Esempio n. 1
0
def showVectorDisplacements():

    global testImage, croppedRefImage, u, v, valid, q1, umean, vmean, x, y, sxyVar, wxyVar, goodvectorsVar
    from scipy import where, compress, logical_and, median, logical_or, nan
    from pylab import resize, transpose, quiver, title, show, find, imshow, hist, figure, clf, draw, save, load, xlabel, ylabel, flipud

    mxy = 3
    wxy = int(wxyVar.get())
    sxy = int(sxyVar.get())
    goodvectors = float(goodvectorsVar.get())
    #process to find PIV-style displacements
    x, y, u, v, q1, valid = simplepiv(croppedRefImage, testImage, wxy, mxy,
                                      sxy)
    good = where(logical_and(q1 > goodvectors, valid > 0), True, False)
    umean = median(compress(good.flat, u.flat))
    vmean = median(compress(good.flat, v.flat))
    u = where(logical_or(q1 < goodvectors, valid < 0), 0, u)
    v = where(logical_or(q1 < goodvectors, valid < 0), 0, v)
    u = u - umean
    v = v - vmean
    save('vecx.out', x)
    save('vecy.out', y)
    save('vecu.out', u)
    save('vecv.out', v)
    save('vecq1.out', q1)
    save('vecvalid.out', valid)
    u = flipud(u)
    v = -flipud(v)
    quiver(x, y, u, v)
    title('Vector displacements')
    xlabel('Pixels')
    ylabel('Pixels')
    show()
    return
Esempio n. 2
0
def filter_nana(ts_data, ts_ticks):

    if ts_data.ndim > 1:
        raise ValueError("filtering-NaN is only defined for" +
                         "one dimensional time series data.")
    ts_ticks = compress(logical_not(isnan(ts_data)), ts_ticks)
    ts_data = compress(logical_not(isnan(ts_data)), ts_data)

    return ts_data, ts_ticks
Esempio n. 3
0
 def normals_check(self):
     triangles_areas, triangles_normals = triangles_areas_normals_computation(
         self.vertexes_coord, self.triangle_vertexes,
         self.triangles_surfaces)
     self.test_normal_integral = zeros(self.S, 'd')
     for s in range(self.S):
         does_t_belongs_to_s = (self.triangles_surfaces == s)
         self.test_normal_integral[s] = sum(
             sum(
                 compress(does_t_belongs_to_s, triangles_areas, 0) *
                 compress(does_t_belongs_to_s, triangles_normals, 0), 1))
Esempio n. 4
0
def LambdaEstimate(Array=scipy.array,
                   Filter=True):
    #===========================================================================
    # copied from R-GenABEL.lamdaest
    #===========================================================================
    Array      = Array.astype(float)
    Estimate   = None
    Ntp        = len(Array)
    QChi2Array = None

    if(Array.max()<=1.0):
#       Convert to quantile function of PValObsArray (df=1) if input are p-values.
        QChi2Array = scipy.stats.chi2.isf(Array,
                                          1)
    else:
        QChi2Array = Array

    if(Filter):
        FilterArray        = (QChi2Array>=1.0e-8)
        QChi2FilteredArray = scipy.compress(FilterArray,QChi2Array)
    else:
        QChi2FilteredArray = QChi2Array
    QChi2FilteredArray.sort()

    PPointsArray = GetPPointsArray(len(QChi2FilteredArray))
    PPointsArray = scipy.sort(scipy.stats.chi2.ppf(PPointsArray,1)) # df=1

    FilterArray          = (PPointsArray!=0.0)
    FilterArray         *= (QChi2FilteredArray!=0.0)
    PPointsArray         = scipy.compress(FilterArray,PPointsArray)
    QChi2FilteredArray   = scipy.compress(FilterArray,QChi2FilteredArray)

#   Fit PPointsArray,QChi2FilteredArray to the linear model.
    P0           = [1.0]
    PBest        = scipy.optimize.leastsq(Residuals,
                                          P0,
                                          args=(PPointsArray,QChi2FilteredArray),
                                          full_output=1,
                                          maxfev=100)
    Estimate = None
    if(type(PBest[0])==scipy.float64):
        Estimate = PBest[0]
    else:
        Estimate     = PBest[0][0]
#   Error estimation of parameter.
    Chi2 = scipy.power(PBest[2]['fvec'],2.0).sum()
    Dof  = len(QChi2FilteredArray)-len(P0)-1
    SE   = scipy.real(scipy.sqrt(PBest[1][0,0])*scipy.sqrt(Chi2/float(Dof)))

    return Estimate,SE
Esempio n. 5
0
    def downsample(self,freq,factor,ranges=[]):
        if shape(factor) and ranges:
#            pdb.set_trace()
            mask=CreateVariableMask(freq, ranges, factor)
        else:
            mask=CreateDownSampleMask(freq, factor)
#            mask=CreateLogMask(freq)
        dsfreq=compress(mask,freq)
        mylist=['mag','phase','coh']
        for item in mylist:
            tempvect=getattr(self,item)
            if tempvect is not None:
                tempvect=colwise(tempvect)
                setattr(self,item,compress(mask,tempvect,0))
        return dsfreq
    def __init__(self, idx, theta, diff_matrix, log_file):
        self.log_file = log_file
        self.log("create beta form")
        self.idx = idx
        self.theta = theta
        self.theta_norm = sp.linalg.norm(theta, ord=None)
        self.diff_matrix = diff_matrix

        # Find the null space for the subsetted diff matrix
        start = time.time()
        zero_theta_idx = self._get_zero_theta_indices(diff_matrix * theta)
        u, s, v = sp.linalg.svd(diff_matrix[zero_theta_idx, :])
        self.log("SVD done %f" % (time.time() - start))
        null_mask = np.ones(v.shape[1])
        null_mask[:s.size] = s <= self.eps
        null_space = sp.compress(null_mask, v, axis=0)
        null_matrix = np.matrix(sp.transpose(null_space))
        start = time.time()
        beta, istop, itn, normr, normar, norma, conda, normx = sp.sparse.linalg.lsmr(
            null_matrix, theta.A1, atol=self.eps, btol=self.eps)
        self.log("sp.sparse.linalg.lsmr done %f, istop %d, itn %d" %
                 ((time.time() - start), istop, itn))
        self.beta = np.matrix(beta).T
        self.u = null_matrix

        # Check that we reformulated theta but it is still very close to the original theta
        # assert(res.size == 0 or res < self.eps)
        if sp.linalg.norm(self.u * self.beta - self.theta, ord=2) > self.eps:
            self.log("Warning: Reformulation is off: diff %f" %
                     sp.linalg.norm(self.u * self.beta - self.theta, ord=2))
        self.log("create beta form success")
Esempio n. 7
0
def nullspace(A, atol=1e-9):
    '''Compute an approximate basis for the nullspace of A using the singular
    value decomposition of `A`.

    Parameters
    ----------
    'A' = ndarray;  A should be at most 2-D.  A 1-D array with length k will be
     treated  as a 2-D with shape (1, k)
    'atol' = float; The absolute tolerance for a zero singular value.  Singular
     values smaller than `atol` are considered to be zero.
    'rtol' = float; The relative tolerance.  Singular values less than
     rtol*smax are considered to be zero, where smax is the largest singular
     value.

    If both `atol` and `rtol` are positive, the combined tolerance is the
    maximum of the two; that is::
    tol = max(atol, rtol * smax)
    Singular values smaller than `tol` are considered to be zero.

    Returns
    -------
    'ns' = ndarray; If `A` is an array with shape (m, k), then `ns` will be an
    array with shape (k, n), where n is the estimated dimension of the
    nullspace of `A`.  The columns of `ns` are a basis for the
    nullspace; each element in numpy.dot(A, ns) will be approximately
    zero.
    '''

    # singular value decomposition
    u, s, vh = sp_la.svd(A)
    null_mask = (s <= atol)
    null_space = sp.compress(null_mask, vh, axis=0)
    return sp.transpose(null_space)
Esempio n. 8
0
def _sampling_matrix(hessian, cutoff=0, temperature=1, step_scale=1):
    # basically need SVD of hessian - singular values and eigenvectors
    # hessian = u * diag(singVals) * vh
    u, sing_vals, vh = scipy.linalg.svd(0.5 * hessian)

    # scroll through the singular values and find the ones whose inverses will
    # be huge and set them to zero also, load up the array of singular values
    # that we store
    # cutoff = (1.0/_.singVals[0])*1.0e03
    # double cutoff = _.singVals[0]*1.0e-02
    cutoff_sing_val = cutoff * max(sing_vals)

    D = 1.0 / scipy.maximum(sing_vals, cutoff_sing_val)

    ## now fill in the sampling matrix ("square root" of the Hessian)
    ## note that sqrt(D[i]) is taken here whereas Kevin took sqrt(D[j])
    ## this is because vh is the transpose of his PT -JJW
    samp_mat = scipy.transpose(vh) * scipy.sqrt(D)

    # Divide the sampling matrix by an additional factor such
    # that the expected quadratic increase in cost will be about 1.
    cutoff_vals = scipy.compress(sing_vals < cutoff_sing_val, sing_vals)
    if len(cutoff_vals):
        scale = scipy.sqrt(
            len(sing_vals) - len(cutoff_vals) +
            sum(cutoff_vals) / cutoff_sing_val)
    else:
        scale = scipy.sqrt(len(sing_vals))

    samp_mat /= scale
    samp_mat *= step_scale
    samp_mat *= scipy.sqrt(temperature)

    return samp_mat
def null(A, eps=1e-12):
    '''Compute a base of the null space of A.'''
    u, s, vh = np.linalg.svd(A)
    padding = max(0,np.shape(A)[1]-np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding,),dtype=bool)),axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
    def __init__(self, idx, theta, diff_matrix, log_file):
        self.log_file = log_file
        self.log("create beta form")
        self.idx = idx
        self.theta = theta
        self.theta_norm = sp.linalg.norm(theta, ord=None)
        self.diff_matrix = diff_matrix

        # Find the null space for the subsetted diff matrix
        start = time.time()
        zero_theta_idx = self._get_zero_theta_indices(diff_matrix * theta)
        u, s, v = sp.linalg.svd(diff_matrix[zero_theta_idx,:])
        self.log("SVD done %f" % (time.time() - start))
        null_mask = np.ones(v.shape[1])
        null_mask[:s.size] = s <= self.eps
        null_space = sp.compress(null_mask, v, axis=0)
        null_matrix = np.matrix(sp.transpose(null_space))
        start = time.time()
        beta, istop, itn, normr, normar, norma, conda, normx = sp.sparse.linalg.lsmr(null_matrix, theta.A1, atol=self.eps, btol=self.eps)
        self.log("sp.sparse.linalg.lsmr done %f, istop %d, itn %d" % ((time.time() - start), istop, itn))
        self.beta = np.matrix(beta).T
        self.u = null_matrix

        # Check that we reformulated theta but it is still very close to the original theta
        # assert(res.size == 0 or res < self.eps)
        if sp.linalg.norm(self.u * self.beta - self.theta, ord=2) > self.eps:
            self.log("Warning: Reformulation is off: diff %f" % sp.linalg.norm(self.u * self.beta - self.theta, ord=2))
        self.log("create beta form success")
Esempio n. 11
0
def _sampling_matrix(hessian, cutoff=0, temperature=1, step_scale=1):    
    # basically need SVD of hessian - singular values and eigenvectors
    # hessian = u * diag(singVals) * vh
    u, sing_vals, vh = scipy.linalg.svd(0.5 * hessian)

    # scroll through the singular values and find the ones whose inverses will
    # be huge and set them to zero also, load up the array of singular values 
    # that we store
    # cutoff = (1.0/_.singVals[0])*1.0e03
    # double cutoff = _.singVals[0]*1.0e-02
    cutoff_sing_val = cutoff * max(sing_vals)

    D = 1.0/scipy.maximum(sing_vals, cutoff_sing_val)

    ## now fill in the sampling matrix ("square root" of the Hessian)
    ## note that sqrt(D[i]) is taken here whereas Kevin took sqrt(D[j])
    ## this is because vh is the transpose of his PT -JJW
    samp_mat = scipy.transpose(vh) * scipy.sqrt(D)

    # Divide the sampling matrix by an additional factor such
    # that the expected quadratic increase in cost will be about 1.
    cutoff_vals = scipy.compress(sing_vals < cutoff_sing_val, sing_vals)
    if len(cutoff_vals):
        scale = scipy.sqrt(len(sing_vals) - len(cutoff_vals)
                           + sum(cutoff_vals)/cutoff_sing_val)
    else:
        scale = scipy.sqrt(len(sing_vals))

    samp_mat /= scale
    samp_mat *= step_scale
    samp_mat *= scipy.sqrt(temperature)

    return samp_mat
Esempio n. 12
0
def null(mat, eps=1e-12):
    '''returns null space of matrix mat'''
    u, s, vh = scipy.linalg.svd(mat)   # , full_matrices=False)
    padding = max(0, np.shape(mat)[1]-np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding, ), dtype=bool)), axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 13
0
def null(A, eps=1e-9):
    u, s, vh = scipy.linalg.svd(A)
    padding = max(0, np.shape(A)[1] - np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding, ), dtype=bool)),
                               axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 14
0
def nullOld(A, eps=1e-14):
    """ Find the null eigenvector x of matrix A, such that Ax=0"""
    # Taken with gratitude from http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    u, s, vh = la.svd(A)
    null_mask = (s <= eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 15
0
 def SetMeanMetaboliteConcentrationAndStdExcludingMissingValues(self):
     boFloats  = scipy.array(list(type(Entry)==float for Entry in self.DataArray))
     TmpDataArray = scipy.compress(boFloats,scipy.array(self.DataArray))
     TmpDataArray = scipy.array(TmpDataArray,dtype=float)
     self.MeanMetaboliteConcentrationExcludingMissingValues = TmpDataArray.mean()
     self.StdMetaboliteConcentrationExlcudingMissingValues  = TmpDataArray.std()
     return
Esempio n. 16
0
def nullspace(M, eps=1e-12):
    M = np.array(M)
    u, s, vh = sp.linalg.svd(M)
    padding = M.shape[1]-s.shape[0]
    null_mask = np.concatenate(((s <= eps), np.ones((padding,), dtype=bool)), axis=0)
    N = sp.compress(null_mask, vh, axis=0)
    return N
Esempio n. 17
0
def nullspace(A, atol=1e-9):
    '''Compute an approximate basis for the nullspace of A using the singular
    value decomposition of `A`.

    Parameters
    ----------
    'A' = ndarray;  A should be at most 2-D.  A 1-D array with length k will be
     treated  as a 2-D with shape (1, k)
    'atol' = float; The absolute tolerance for a zero singular value.  Singular
     values smaller than `atol` are considered to be zero.
    'rtol' = float; The relative tolerance.  Singular values less than
     rtol*smax are considered to be zero, where smax is the largest singular
     value.

    If both `atol` and `rtol` are positive, the combined tolerance is the
    maximum of the two; that is::
    tol = max(atol, rtol * smax)
    Singular values smaller than `tol` are considered to be zero.

    Returns
    -------
    'ns' = ndarray; If `A` is an array with shape (m, k), then `ns` will be an
    array with shape (k, n), where n is the estimated dimension of the
    nullspace of `A`.  The columns of `ns` are a basis for the
    nullspace; each element in numpy.dot(A, ns) will be approximately
    zero.
    '''

    # singular value decomposition
    u, s, vh = sp_la.svd(A)
    null_mask = (s <= atol)
    null_space = sp.compress(null_mask, vh, axis=0)
    return sp.transpose(null_space)
Esempio n. 18
0
def null(A, eps=1e-15):
    """Compute nullspace

    Computes a basis for the nullspace of a matrix.

    Args:
        A (np.array): Rectangular matrix
        eps (float): Singular value tolerance for nullspace detection

    Returns:
        np.array: Basis for matrix nullspace

    Examples:

        >>> from pybuck import null
        >>> import numpy as np
        >>> A = np.arange(9).reshape((3, -1))
        >>> N = null(A)

    """
    u, s, vh = svd(A)
    s = pad(s, (0, vh.shape[0] - len(s)), mode='constant')
    null_mask = (s <= eps)
    null_space = compress(null_mask, vh, axis=0)

    return t_mat(null_space)
Esempio n. 19
0
def dat_Truncate(Data, before=None, after=None, ind=0, direction="Column"):
    """a function that truncate a bidiemnsional array  or a list of array
        
        Data : a bidimensional numpy array or a list of monodimensional array 
        direction : string with value Column or Row,define wich kind of truncation
        index : index of the column or row  respect wich trucation is done
        before :all data with value smaller than it are cutted
        after :all data with value bigger than it are cutted
        ------
        if data is a list the direction keyworl will be ignored
        N.B.
        the index column must be in a growing order
        -------
        Returns 
        a truncate array if input was array, else a list
        
        """
    if (before or after) == None:
        raise ValueError, ('  any limit defined for truncation')
    if (before > after):
        if after == None: pass
        else:
            raise ValueError, (
                '  before= %f bigger than after = %f any value remain!') % (
                    before, after)

    if direction == "Row" or direction == "Column":
        pass
    else:
        raise ValueError, (' direction = string with value Column or Row,')

    if isinstance(Data, list):
        sbefore = bisect.bisect_left(Data[ind], before)
        safter = bisect.bisect_right(Data[ind], after)
        if (after) == None:
            Data = [item[sbefore:] for item in Data]
        elif (before) == None:
            Data = [item[:safter] for item in Data]
        else:
            Data = [item[sbefore:safter] for item in Data]
        return Data

    if len(Data.shape) == 1:
        Data = scipy.compress(((before < Data) & (Data < after)),
                              Data)  #axis =0
    else:
        if direction == "Row":
            Data = Data.transpose()
        sbefore = bisect.bisect_left(Data[:, ind], before)
        safter = bisect.bisect_right(Data[:, ind], after)
        if (after) == None:
            Data = Data[sbefore:, :]
        elif (before) == None:
            Data = Data[:safter, :]
        else:
            Data = Data[sbefore:safter, :]
        if direction == "Row":
            Data = Data.transpose()
    return Data
Esempio n. 20
0
def null(A, eps=1e-15):
  import scipy
  from scipy import matrix
  A = matrix(A)
  u, s, vh = scipy.linalg.svd(A)
  null_mask = (s <= eps)
  null_space = scipy.compress(null_mask, vh, axis=0)
  return scipy.transpose(null_space)
Esempio n. 21
0
def null(A, eps=1e-15):
  import scipy
  from scipy import matrix
  A = matrix(A)
  u, s, vh = scipy.linalg.svd(A)
  null_mask = (s <= eps)
  null_space = scipy.compress(null_mask, vh, axis=0)
  return scipy.transpose(null_space)
Esempio n. 22
0
def null_space(A, eps=1e-13):
    """
    Calculate null space of matrix A
    """
    _, s, vh = sc.linalg.svd(A)
    s = np.append(s, np.zeros(vh.shape[0] - s.shape[0]))
    null_mask = s <= eps
    return sc.transpose(sc.compress(null_mask, vh, axis=0))
Esempio n. 23
0
def null(H, eps=1e-12):
    from scipy import linalg
    u, s, vh = linalg.svd(H)
    padding = max(0, np.shape(H)[1] - np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding, ), dtype=bool)),
                               axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 24
0
def null(A, eps=1e-15):
    """Returns the null-space of the matrix A
    Implementation from
    http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    """
    u, s, vh = linalg.svd(A)
    null_mask = (s < eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 25
0
def null(A, eps=1e-15):
    """Returns the null-space of the matrix A
    Implementation from
    http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    """
    u, s, vh = linalg.svd(A)
    null_mask = (s < eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 26
0
 def null(self, A, eps=1e-12):
     '''Compute a base of the null space of A.'''
     u, s, vh = np.linalg.svd(A)
     padding = max(0, np.shape(A)[1] - np.shape(s)[0])
     null_mask = np.concatenate(((s <= eps), np.ones(
         (padding, ), dtype=bool)),
                                axis=0)
     null_space = scipy.compress(null_mask, vh, axis=0)
     return scipy.transpose(null_space)
Esempio n. 27
0
def null(A, eps=1e-6):
    u, s, vh = linalg.svd(A)
    np.save('sing_val', arr=s)
    padding = max(0, np.shape(A)[1] - np.shape(s)[0])
    null_mask = np.concatenate(((s <= eps), np.ones((padding, ), dtype=bool)),
                               axis=0)
    print(null_mask)
    null_space = compress(null_mask, vh, axis=0)
    return transpose(null_space)
Esempio n. 28
0
File: cme.py Progetto: poneill/amic
def null(A, eps=1e-15):
    """
    Compute nullspace of A.  Thanks Robert Kern and Ryan Krauss:
    http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    """
    u, s, vh = la.svd(A)
    null_mask = (s <= eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 29
0
def main():
    parser = OptionParser(usage=usage)
    parser.add_option(
        "-e", "--epsilon", type=float, dest="eps", default=None, help="set drop-off tolerance [default: %default]"
    )
    parser.add_option("-o", metavar="figname", dest="figname", default=None, help="save the figure to figname")
    parser.add_option(
        "-t",
        "--transparent",
        action="store_true",
        dest="transparent",
        default=False,
        help="save the figure as transparent",
    )
    parser.add_option(
        "-n", "--no-show", action="store_true", dest="no_show", default=False, help="do not show the figure"
    )
    (options, args) = parser.parse_args()
    if len(args) < 1:
        print usage
        return
    filename = args[0]
    print filename + ":"

    fd = open(filename, "r")
    n_row, n_col = map(int, fd.readline().split())
    n_item = int(fd.readline())
    print n_row, n_col, n_item

    ij = nm.zeros((n_item, 2), nm.int32)
    val = nm.zeros((n_item,), nm.float64)
    for ii, row in enumerate(fd.readlines()):
        aux = row.split()
        ij[ii] = int(aux[0]), int(aux[1])
        val[ii] = float(aux[2])

    if options.eps is not None:
        print "using", options.eps
        ij = nm.compress(nm.absolute(val) > options.eps, ij, 0)
        n_item = ij.shape[0]
    else:
        print "showing all"

    print n_item
    if n_item:
        plot(ij[:, 1] + 0.5, ij[:, 0] + 0.5, linestyle="None", marker=",", markersize=0.5, markeredgewidth=0.1)
    axis([-0.5, n_row + 0.5, -0.5, n_col + 0.5])
    axis("image")
    xlabel("%d x %d: %d nnz, %.2f\%% fill" % (n_row, n_col, n_item, 100.0 * n_item / float(n_row * n_col)))
    gca().set_ylim(gca().get_ylim()[::-1])

    if options.figname is not None:
        savefig(options.figname, transparent=options.transparent)

    if not options.no_show:
        show()
def null(A, eps=1e-12,sparse=True):
    if sparse == True:
        X = scipy.sparse.csc_matrix(A)
        n=X.shape[1]
        u, s, vh = svds(X, n-1, which='SM')
    else:
        u, s, vh = scipy.linalg.svd(A)
    null_mask = (s <= eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 31
0
def edgeNumber_triangles_indexes(list_of_edges_numbers, RWGNumber_signedTriangles):
    """This function returns a 1-D array of the indexes of the triangles corresponding
    to a 1-D array of edges_numbers. This function is important for creating lists of triangles
    that will participate to the MoM, given a particular criterium concerning the edges."""
    indexes_of_triangles_tmp1 = take(RWGNumber_signedTriangles, list_of_edges_numbers, axis=0).flat
    indexes_of_triangles_tmp2 = sort(indexes_of_triangles_tmp1, kind='mergesort')
    indexes_of_triangles_to_take = ones(indexes_of_triangles_tmp2.shape[0], 'i')
    indexes_of_triangles_to_take[1:] = indexes_of_triangles_tmp2[1:] - indexes_of_triangles_tmp2[:-1]
    indexes_of_triangles = compress(indexes_of_triangles_to_take != 0, indexes_of_triangles_tmp2)
    return indexes_of_triangles.astype('i')
Esempio n. 32
0
def calc_null_space3(A_spar,tjek=False):
    eps = 1e-12
    u, s, vh = scipy.sparse.linalg.svds(A_spar,k=min(A_spar.shape)-1,which='SM',tol=eps)
    N= scipy.sparse.csr_matrix(scipy.compress(s<=eps,vh,axis=0).T)
    if tjek :
        if A_spar.dot(N).max()>1e-3:
            logger.warning('Nullspace tollerence violated')
        else :
            logger.info('Nullspace is good')
    return N
Esempio n. 33
0
def get_intervals(traj):
    # We want to break up our integrals when events fire, so first we figure out
    #  when they fired by looking for duplicated times in the trajectory
    times = traj.get_times()
    eventIndices = scipy.compress(scipy.diff(times) == 0, 
                                  scipy.arange(len(times)))
    intervals = zip([0] + list(eventIndices + 1), 
                    list(eventIndices + 1) + [len(times)])

    return intervals
def get_intervals(traj):
    # We want to break up our integrals when events fire, so first we figure out
    #  when they fired by looking for duplicated times in the trajectory
    times = traj.get_times()
    eventIndices = scipy.compress(scipy.diff(times) == 0, 
                                  scipy.arange(len(times)))
    intervals = zip([0] + list(eventIndices + 1), 
                    list(eventIndices + 1) + [len(times)])

    return intervals
Esempio n. 35
0
    def FilterSEs(self,
                  XmlObj=lxml.etree._ElementTree,
                  DCs=DataContainer.DataContainers,
                  ColumnTag=str,
                  boDryRun=False):

        FiltersTag = None
        if(boDryRun):
            FiltersTag = 'DryRunFilters'
        else:
            FiltersTag = 'Filters'

        FilterTags  = XmlObj.getroot().find('MtbGWAColumns').find(ColumnTag).find(FiltersTag).text.split(',')

        self.InitFilterReportDictDict(ColumnTag)

        FilterArray = None
        for Tag in FilterTags:
            Operator     = XmlObj.getroot().find('QCFilters').find(Tag).find('Operator').text
            CompareValue = XmlObj.getroot().find('QCFilters').find(Tag).find('Compare').text
            ValueType    = XmlObj.getroot().find('QCFilters').find(Tag).find('CompareType').text

            FFunction = FilterFunction.FilterFunction(OperatorString=Operator,
                                                      CompareString=CompareValue,
                                                      CompareType=ValueType)

            InitLength  = len(DCs.DataContainers[ColumnTag].GetDataArray())
            FinalLength = None
            FilterArray = FFunction.Run(DataArray=DCs.DataContainers[ColumnTag].GetDataArray())

            if(not boDryRun):
                for Key in DCs.DataContainers.iterkeys():
                    DataArray = scipy.compress(FilterArray,
                                               DCs.DataContainers[Key].GetDataArray())
                    DCs.DataContainers[Key].ReplaceDataArray(DataArray)
                FinalLength = len(DCs.DataContainers[ColumnTag].GetDataArray())
            else:
                CounterDict = collections.defaultdict(int)
                for Entry in FilterArray:
                    CounterDict[Entry] += 1
                Difference  = CounterDict[False]
                FinalLength = InitLength-Difference


            self.SetFilterReportDictDict(ParentTag=ColumnTag,
                                         ChildTag=Tag,
                                         Value='Column Tag   = '+ColumnTag+'\n'+\
                                               'Filter Tag   = '+Tag+'\n'+\
                                               'Start Length = '+str(InitLength)+'\n'+\
                                               'Final Length = '+str(FinalLength)+'\n'+\
                                               'Difference   = '+str(InitLength-FinalLength))

        return DCs,\
               FilterTags
Esempio n. 36
0
def nulls2(a, rtol=1e-12):
    from sparsesvd import sparsesvd
    smat = scipy.sparse.csc_matrix(a)
    ut, s, vt = sparsesvd(smat, np.min(a.shape))
    print vt.shape
    padding = max(0,max(np.shape(a))-np.shape(s)[0])
    null_mask = np.concatenate(((s <= rtol), np.ones((padding,),dtype=bool)),axis=0)
    print null_mask.shape
    null_space = scipy.compress(null_mask, vt, axis=0)
    rank = (s > rtol*s[0]).sum()
    return s, rank, scipy.transpose(null_space)
Esempio n. 37
0
def null(A, eps=1e-6):
    u, s, vh = numpy.linalg.svd(A, full_matrices=1, compute_uv=1)
    null_rows = []
    rank = numpy.linalg.matrix_rank(A)
    for i in range(A.shape[1]):
        if i < rank:
            null_rows.append(False)
        else:
            null_rows.append(True)
    null_space = scipy.compress(null_rows, vh, axis=0)
    return null_space.T
def CompleteBase(V, B, eps=1e-4):
    tbase = append(V, B, axis=1)
    p, l, u = lu(tbase)
    echelon = zeros(u.shape[1], int)
    
    for row in u:
        tmp = nonzero(abs(row) > eps)[0]
        if tmp.size:
            echelon[tmp[0]] = 1
  
    return compress(echelon, tbase, axis=1)
Esempio n. 39
0
def is_surface_closed(triangles_surfaces, edgeNumber_triangles):
    """this function determines if a surface is open or closed.
       it also provides relationships between surfaces (linked or not)"""
    S = max(triangles_surfaces)+1
    NUMBER_TRIANGLES_IN_SURFACE = zeros(S, 'i')
    for s in triangles_surfaces:
        NUMBER_TRIANGLES_IN_SURFACE[s] += 1
    connected_surfaces = {}
    # we now count the number of INNER edges for each surface.
    # the edges that are junctions receive a special treatment:
    # only if the edge has two triangles on the given surface, 
    # can it be counted as an inner edge, which will then be 
    # counted in NUMBER_EDGES_IN_SURFACE
    NUMBER_EDGES_IN_SURFACE = zeros(S, 'i')
    for edge_number, triangles_tmp in edgeNumber_triangles.items():
        surfaces_appeared_already = zeros(S, 'i')
        if len(triangles_tmp)>2: # we have a junction here
            for t in triangles_tmp:
                surface = triangles_surfaces[t]
                if surfaces_appeared_already[surface]==0:
                    surfaces_appeared_already[surface] = 1
                else:
                    NUMBER_EDGES_IN_SURFACE[surface] += 1
            surfaces_present = compress(surfaces_appeared_already>0, arange(S))
            if len(surfaces_present)==2:
                s0, s1 = min(surfaces_present), max(surfaces_present)
                if (s0, s1) in connected_surfaces:
                    connected_surfaces[(s0, s1)].append(edge_number)
                else:
                    connected_surfaces[(s1, s0)] = [edge_number]
            else:
                for index1 in arange(len(surfaces_present)):
                    for index2 in arange(index1+1, len(surfaces_present)):
                        s1 = min(surfaces_present[index1], surfaces_present[index2])
                        s2 = max(surfaces_present[index1], surfaces_present[index2])
                        if (s1, s2) in connected_surfaces:
                            connected_surfaces[(s1, s2)].append(edge_number)
                        else:
                            connected_surfaces[(s1, s2)] = [edge_number]
        else:
            surface = triangles_surfaces[triangles_tmp[0]]
            NUMBER_EDGES_IN_SURFACE[surface] += 1

    is_closed_surface = ( (NUMBER_EDGES_IN_SURFACE*2) == (NUMBER_TRIANGLES_IN_SURFACE*3) )
    # we now check for potential closed surfaces: surfaces which can be closed
    # and on which we can therefore apply the CFIE
    potential_closed_surfaces = {}
    for key, item in connected_surfaces.items():
        s0, s1 = key[0], key[1]
        numberEdges0, numberEdges1 = NUMBER_EDGES_IN_SURFACE[s0], NUMBER_EDGES_IN_SURFACE[s1]
        numberTriangles0, numberTriangles1 = NUMBER_TRIANGLES_IN_SURFACE[s0], NUMBER_TRIANGLES_IN_SURFACE[s1]
        if ( numberEdges0 + numberEdges1 + len(item) )*2 == 3*(numberTriangles0 + numberTriangles1):
            potential_closed_surfaces[key] = item
    return is_closed_surface * 1, connected_surfaces, potential_closed_surfaces
Esempio n. 40
0
def null(A, eps=1e-6):
    u, s, vh = numpy.linalg.svd(A,full_matrices=1,compute_uv=1)
    null_rows = [];
    rank = numpy.linalg.matrix_rank(A)
    for i in range(A.shape[1]):
        if i<rank:
            null_rows.append(False);
        else:
            null_rows.append(True);
    null_space = scipy.compress(null_rows, vh, axis=0)
    return null_space.T
Esempio n. 41
0
def compute_stationary_dist(Pt):
    eps = 1e-15
    u,s,vh = linalg.svd((np.eye(Pt.shape[0]) - Pt).T)
    null_mask = (s<eps)
    null_space = scipy.compress(null_mask, vh, axis = 0)
    #print 'nullspace:',null_space
    Pi = null_space[0]/null_space[0].sum()
    if (Pi<0).sum()>0 or null_space.shape[0]>1:
        print Pt
        print null_space
        print Pi
    assert (Pi<0).sum()==0
    return Pi
Esempio n. 42
0
def null(A, eps=1e-10):
    """
    null-space of a Matrix or 2d-array
    """
    n, m = A.shape
    if n > m :
        return null(A.T, eps).T
        #return null(scipy.transpose(A), eps)
    u, s, vh = sc.linalg.svd(A)
    s=sc.append(s,sc.zeros(m))[0:m]
    null_mask = (s <= eps)
    null_space = sc.compress(null_mask, vh, axis=0)
    return null_space.T
Esempio n. 43
0
def nullspace(A, eps=1e-15):
    u, s, vh = sp.linalg.svd(A, full_matrices=1, compute_uv=1)
    # Pad so that we get the nullspace of a wide matrix.
    N = A.shape[1]
    K = s.shape[0]
    if K < N:
        s[K + 1:N] = 0
        s2 = np.zeros((N))
        s2[0:K] = s
        s = s2
    null_mask = (s <= eps)
    null_space = sp.compress(null_mask, vh, axis=0)
    return sp.transpose(null_space)
Esempio n. 44
0
def root_inv(A, eps=1e-8):
    """Function:
    Get A^-1/2 based on SVD. Dimensions may be reduced.

    Author(s): Takashi Tsuchimochi
    """

    u, s, vh = np.linalg.svd(A, hermitian=True)
    mask = s >= eps
    red_u = sp.compress(mask, u, axis=1)
    # Diagonal matrix of s**-1/2
    sinv2 = np.diag([1 / np.sqrt(i) for i in s if i > eps])
    Sinv2 = red_u @ sinv2
    return Sinv2
Esempio n. 45
0
def null(X,eps=1e-8):
    """ Define null space function for solving AP=0 """
    Solution=0
    u, s, vh = scipy.linalg.svd(X)     # Single value decomposition
    null_mask = (np.absolute(s) <= eps)
    null_space = scipy.compress(null_mask, vh, axis=0)  # Find corresponding vectors
    n,m=np.shape(null_space)
    for i in range(0,n):           # Choose only solutions that can be probabilities
        positive=np.greater_equal(null_space[i,:],np.zeros(m))
        negative=np.less_equal(null_space[i,:],np.zeros(m))
        if (np.all(positive)==True or np.all(negative)==True):        # Ensure sum(Pi)=1
            Solution=np.absolute(null_space[i,:])
            Solution=Solution/np.sum(Solution)
    return scipy.transpose(Solution)
Esempio n. 46
0
def _sampling_matrix(hessian, cutoff=None, diag = None, temperature=1, step_scale=1):
    # basically need SVD of hessian - singular values and eigenvectors
    # hessian = u * diag(singVals) * vh
    #u, sing_vals, vh = scipy.linalg.svd(hessian)

    # scroll through the singular values and find the ones whose inverses will
    # be huge and set them to zero also, load up the array of singular values 
    # that we store
    # cutoff = (1.0/_.singVals[0])*1.0e03
    # double cutoff = _.singVals[0]*1.0e-02
    # when cutoff is set to zero it means that all values are included
    # cutoff*(sloppiest eigenvalue)
    
    if cutoff:
        u, sing_vals, vh = scipy.linalg.svd(hessian)
        cutoff_sing_val = cutoff * max(sing_vals)
        #when cutoff is set to zero it means that all values are included
        D = 1.0/scipy.maximum(sing_vals, cutoff_sing_val)
        samp_mat = scipy.transpose(vh)*scipy.sqrt(D)
    # instead of cutoff use another method, adding diagonal term to hessian
    elif diag is not None:
        u, sing_vals, vh = scipy.linalg.svd(hessian+diag)
        D = 1.0/sing_vals
        samp_mat = scipy.transpose(vh)*scipy.sqrt(D)
        cutoff_sing_val = diag[0,0]
    else: 
        u, sing_vals, vh = scipy.linalg.svd(hessian)
        D = 1.0/sing_vals
        samp_mat = scipy.transpose(vh)*scipy.sqrt(D)
        cutoff_sing_val = 0

    ## now fill in the sampling matrix ("square root" of the Hessian)
    ## note that sqrt(D[i]) is taken here whereas Kevin took sqrt(D[j])
    ## this is because vh is the transpose of his PT -JJW
    #samp_mat = scipy.transpose(vh) * scipy.sqrt(D)

    # Divide the sampling matrix by an additional factor such
    # that the expected quadratic increase in cost will be about 1.
    cutoff_vals = scipy.compress(sing_vals < cutoff_sing_val, sing_vals)
    if len(cutoff_vals):
        scale = scipy.sqrt(len(sing_vals) - len(cutoff_vals)
                           + sum(cutoff_vals)/cutoff_sing_val)
    else:
        scale = scipy.sqrt(len(sing_vals))

    samp_mat /= scale
    samp_mat *= step_scale
    samp_mat *= scipy.sqrt(temperature)

    return samp_mat
Esempio n. 47
0
    def get_U(self, eps=1e-5):

        from scipy import linalg, compress

        # get the null matrix N of M
        # such that U=[M;N] is orthogonal
        M = self.M.detach().cpu()
        A = torch.zeros(M.shape[1] - M.shape[0], M.shape[1])
        A = torch.cat([M, A])
        u, s, vh = linalg.svd(A.numpy())
        null_mask = (s <= eps)
        null_space = compress(null_mask, vh, axis=0)
        N = torch.tensor(null_space)
        return torch.cat([self.M, N.to(self.M.device)])
Esempio n. 48
0
def nullspace(A, myeps=1e-10):
    """The RumPath class needs the ability to compute the null-space of
    a small matrix. This is provided here. But we now also need scipy!

    This routine was copy-pasted from
    http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    How the h*** does numpy/scipy not have a null-space implemented?
    """
    u, s, vh = scipy.linalg.svd(A)
    padding = max(0, np.shape(A)[1] - np.shape(s)[0])
    null_mask = np.concatenate(((s <= myeps),
                                np.ones((padding,),dtype=bool)),
                               axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 49
0
def nullspace(A, myeps=1e-10):
    """The RumPath class needs the ability to compute the null-space of
    a small matrix. This is provided here. But we now also need scipy!

    This routine was copy-pasted from
    http://stackoverflow.com/questions/5889142/python-numpy-scipy-finding-the-null-space-of-a-matrix
    How the h*** does numpy/scipy not have a null-space implemented?
    """
    u, s, vh = scipy.linalg.svd(A)
    padding = max(0, np.shape(A)[1] - np.shape(s)[0])
    null_mask = np.concatenate(((s <= myeps), np.ones(
        (padding, ), dtype=bool)),
                               axis=0)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(null_space)
Esempio n. 50
0
def null(A, eps=1e-15):
    """Computes a basis for the nullspace of a matrix
    Usage
        N = null(A)
    Arguments
        A = rectangular matrix
    Keyword Arguments
        eps = singular value tolerance for nullspace detection
    Returns
        N = matrix of column vectors; basis for nullspace
    """
    u, s, vh = svd(A)
    s = pad(s,(0,vh.shape[0]-len(s)),mode='constant')
    null_mask = (s <= eps)
    null_space = compress(null_mask, vh, axis=0)
    return transpose(null_space)
Esempio n. 51
0
def normal2points(p1, eps=1e-13):
    '''
    normal vector defined by a surface made from the group of
    points
  
    p1 : [#, 3] matrix, where # is the number of points
    '''
    p1 = np.hstack([p1, np.atleast_2d(np.array([1] * len(p1))).transpose()])
    u, s, vh = np.linalg.svd(p1)

    null_mask = (s <= eps)
    if sum(null_mask) == 0:
        print("no null space??", p1, s)
    null_space = scipy.compress(null_mask, vh, axis=0)
    norm = null_space[0, :3]
    norm = norm / np.sqrt(np.sum(norm**2))
    return norm
Esempio n. 52
0
def find_nullspace(M):
    """

    Convenient function that finds the nullspace of a matrix M using SVD.

    Inputs:
    M -> matrix

    Outputs:
    null_space -> array of nullspace vectors

    """

    U, S, V = np.linalg.svd(M)
    eps = 1e-13 #that threshold
    null_mask = (S <= eps)
    null_space = scipy.compress(null_mask, V, axis=0)

    return null_space
Esempio n. 53
0
    def FilterChrs(self,
                   XmlObj=lxml.etree._ElementTree,
                   DCs=DataContainer.DataContainers,
                   ColumnTag=str):

        FilterTags  = XmlObj.getroot().find('MtbGWAColumns').find(ColumnTag).find('Filters').text.split(',')

        self.InitFilterReportDictDict(ColumnTag)

        FilterArray = None
        for Tag in FilterTags:
            Operator      = XmlObj.getroot().find('QCFilters').find(Tag).find('Operator').text
            CompareValues = XmlObj.getroot().find('QCFilters').find(Tag).find('Compare').text.split(',')
            ValueType     = XmlObj.getroot().find('QCFilters').find(Tag).find('CompareType').text

            FilterArray = None
            for i in range(len(CompareValues)):
                CValue    = CompareValues[i]
                FFunction = FilterFunction.FilterFunction(OperatorString=Operator,
                                                          CompareString=CValue,
                                                          CompareType=ValueType)
                if(i==0):
                    FilterArray = FFunction.Run(DataArray=DCs.DataContainers[ColumnTag].GetDataArray())
                else:
                    FilterArray = (FilterArray | FFunction.Run(DataArray=DCs.DataContainers[ColumnTag].GetDataArray()))

            InitLength  = len(DCs.DataContainers[ColumnTag].GetDataArray())
            for Key in DCs.DataContainers.iterkeys():
                DataArray = scipy.compress(FilterArray,
                                           DCs.DataContainers[Key].GetDataArray())
                DCs.DataContainers[Key].ReplaceDataArray(DataArray)
            FinalLength = len(DCs.DataContainers[ColumnTag].GetDataArray())

            self.SetFilterReportDictDict(ParentTag=ColumnTag,
                                         ChildTag=Tag,
                                         Value='Column Tag   = '+ColumnTag+'\n'+\
                                               'Filter Tag   = '+Tag+'\n'+\
                                               'Start Length = '+str(InitLength)+'\n'+\
                                               'Final Length = '+str(FinalLength)+'\n'+\
                                               'Difference   = '+str(InitLength-FinalLength))

        return DCs,\
               FilterTags
Esempio n. 54
0
def main():
    parser = OptionParser( usage = usage )
    parser.add_option( "-e", "--epsilon", type = float,
                       dest = "eps", default = None,
                       help = "set drop-off tolerance [default: %default]" )
    (options, args) = parser.parse_args()
    if len( args ) < 1:
        print usage
        return
    filename = args[0]
    print filename + ':'

    fd = open( filename, "r" )
    n_row, n_col = map( int, fd.readline().split() )
    n_item = int( fd.readline() )
    print n_row, n_col, n_item

    ij = nm.zeros( (n_item,2), nm.int32 )
    val = nm.zeros( (n_item,), nm.float64 )
    for ii, row in enumerate( fd.readlines() ):
        aux = row.split()
        ij[ii] = int( aux[0] ), int( aux[1] )
        val[ii] = float( aux[2] )

    if options.eps is not None:
        print 'using', options.eps
        ij = nm.compress( nm.absolute( val ) > options.eps, ij, 0 )
        n_item = ij.shape[0]
    else:
        print 'showing all'

    print n_item
    if n_item:
        plot( ij[:,1] + 0.5, ij[:,0] + 0.5, linestyle = 'None',
              marker = ',', markersize = 0.5, markeredgewidth = 0.1 )
    axis( [-0.5, n_row+0.5, -0.5, n_col+0.5] )
    axis( 'image' )
    xlabel( '%d x %d: %d nnz, %.2f\%% fill'
            % (n_row, n_col, n_item, 100. * n_item / float( n_row * n_col )) )
    gca().set_ylim( gca().get_ylim()[::-1] )
    show()
Esempio n. 55
0
    def evaluate_interpolated_traj(self,dv_id,time,subinterval=None,der=0) :
        """ Needs Trajectory.build_interpolated_traj() to be called first

        Arguments:
        dvid         the name of the component of the trajectory you wish to 
                     evaluate
        time         a vector of times or a scalar
        subinterval  an optional argument specifying the time interval 
                     between events that the time argument lies (but given a 
                     valid time, it will be found automatically)
        der          the derivative of the spline function you want, the order
                     of the derivative will be constrained by the order of the 
                     interpolated spline
        Outputs:
        A single scalar value (if time input is a scalar)
        or
        (returned_times, interpolated_trajectory at those times) if times is a
        vector

        Note: It is necessary to have a returned_times argument too, in case 
              the times passed in happens to have a timepoint that corresponds 
              to an event time, which often has two trajectory values associated
              with it.
        """
        if scipy.isscalar(time) :
            time = scipy.asarray([time]) # if a scalar was passed in, convert to an array
        else :
            time = scipy.asarray(time)
        local_tcks = self.tcks
        sorted_intervals = scipy.sort(local_tcks.keys(),axis=0)

        if subinterval is not None : # confine things to just one interval
            if subinterval not in local_tcks.keys() :
                raise "Not a valid subinterval (not in Trajectory.tcks.keys())"
            else :
                sorted_intervals = [[subinterval[0],subinterval[1]]]
                interval_start_ind = 0
                interval_end_ind = 0
        else :
            # sorted_intervals ends up being a list of lists, each length 2, not tuples anymore
            for interval_ind, interval in enumerate(sorted_intervals) :
                start_time, end_time = interval[0],interval[1]
                if (time[0] >= start_time) :
                    interval_start_ind = interval_ind
                if (time[-1] <= end_time) :
                    interval_end_ind = interval_ind
                    break

        dv_y = []
        returned_times = []
        dv_ind = self.key_column.keyToIndex[dv_id]
        for interval in sorted_intervals[interval_start_ind:(interval_end_ind+1)] :
            currTimes = scipy.compress( scipy.logical_and((time>=interval[0]),(time<=interval[1])) , time )
            startslice, endslice = 0, None
            if len(currTimes) > 1 :
                if (currTimes[0]==currTimes[1]) :
                # skip the first time point because it's repeated
                    startslice = 1
                if (currTimes[-1]==currTimes[-2]) :
                # skip the last time point because it's repeated
                    endslice = -1
                dv_y.extend( scipy.interpolate.splev(currTimes[startslice:endslice],
                                local_tcks[(interval[0],interval[1])][dv_ind],der=der) )
                returned_times.extend(currTimes[startslice:endslice])
            elif len(currTimes) == 1: # explicitly check, because len(currTimes) could = 0
                dv_y.extend( [ scipy.interpolate.splev(currTimes, local_tcks[(interval[0],interval[1])][dv_ind],der=der) ])
                returned_times.extend(currTimes[startslice:endslice])

        if len(returned_times) == 1 :
            return dv_y[0]
        else :
            return returned_times,dv_y
Esempio n. 56
0
def null(A, eps=1e-10):
    u, s, vh = scipy.linalg.svd(A)
    null_mask = (s <= eps)
    null_space = scipy.compress(null_mask, vh, axis=0)
    return scipy.transpose(scipy.conj(null_space))
Esempio n. 57
0
        NGenes                    = len(Genes)
        DataDict[Trait]['NGenes'] = NGenes
        os.remove(DecomprFile)
        for Alpha in AlphaLvls:
            BH    = statsmodels.stats.multitest.multipletests(pvals=PVals,
                                                              alpha=Alpha,
                                                              method='fdr_bh',
                                                              returnsorted=False)
            DataDict[Trait]['Alpha_'+str(Alpha)]     = Alpha
            DataDict[Trait]['AlphaBonf_'+str(Alpha)] = BH[3]
            BHPVals  = scipy.array(['BHp_value_alpha='+str(Alpha)])
            BHPVals  = scipy.append(BHPVals,BH[1].astype(str))
            Data     = scipy.vstack((Data,BHPVals))
            BHAccept = scipy.array(['BHAccept_alpha='+str(Alpha)])
            BHAccept = scipy.append(BHAccept,BH[0].astype(str))
            DataDict[Trait]['GeneSetAtAlpha_'+str(Alpha)] = scipy.compress(condition=BH[0],
                                                                           a=Genes).tolist()
            Data     = scipy.vstack((Data,BHAccept))
        OutFile  = os.path.join('Data',os.path.basename(DecomprFile))
        scipy.save(file=OutFile,
                   arr=Data)
        os.system('lbzip2 -f '+OutFile)
        print OutFile
    fw = open('Data/DataDict.json','w')
    json.dump(obj=DataDict,
              fp=fw)
    fw.close()
    os.system('lbzip2 -f Data/DataDict.json')

    AllGenesFile = 'Data/UniqGenesOverAllTraits.npy'
    scipy.save(file=AllGenesFile,
               arr=AllGenes)
Esempio n. 58
0
    def PlotManhattan(self, xname=str, yname=str, Log=Logger):

        LogString = "**** Generating Manhattan plot ..."
        print LogString
        Log.Write(LogString + "\n")

        PylabParameters, Rectangle = self.PylabUpdateParams()
        pylab.rcParams.update(PylabParameters)
        PylabFigure = pylab.figure()
        PylabFigure.clf()
        PylabAxis = PylabFigure.add_axes(Rectangle)
        XMax = 0
        XTicks = []
        XTickLabels = []
        for i in range(len(self.List)):
            DCs = self.List[i]
            XName = ""
            YName = ""
            for Key in DCs.DataContainers.iterkeys():
                if re.search(xname, Key):
                    XName = Key
                if re.search(yname, Key):
                    YName = Key

            X = []
            Y = []
            for i in range(len(DCs.DataContainers[YName].GetDataArray())):
                YEntry = DCs.DataContainers[YName].GetDataArray()[i]
                XEntry = DCs.DataContainers[XName].GetDataArray()[i]
                if YEntry != "-1":
                    Y.append(float(YEntry))
                    X.append(int(XEntry))
                else:
                    Y.append(1.0)
                    X.append(int(XEntry))
            XTicks.append(0.5 * (min(X) + max(X)) + XMax)
            XTickLabels.append(r"${\rm " + DCs.Label + "}$")
            X = scipy.array(X) + XMax
            XMax = X.max() + self.OffsetBetweenChrs
            Y = -scipy.log10(scipy.array(Y))

            YInsign = Y < -scipy.log10(1.0e-6)
            YSign = Y > -scipy.log10(5.0e-8)
            YSugg = Y >= -scipy.log10(1.0e-6)
            YSugg *= Y <= -scipy.log10(5.0e-8)

            YY = scipy.compress(YInsign, Y)
            if len(YY) > 0:
                PylabAxis.scatter(x=scipy.compress(YInsign, X), y=YY, color=DCs.Color, s=0.5)
            YY = scipy.compress(YSugg, Y)
            if len(YY) > 0:
                PylabAxis.scatter(x=scipy.compress(YSugg, X), y=YY, color=DCs.Color, s=5.0)
            YY = scipy.compress(YSign, Y)
            if len(YY) > 0:
                PylabAxis.scatter(x=scipy.compress(YSign, X), y=YY, color=DCs.Color, s=10.0)

        XSign = scipy.array(PylabAxis.get_xlim())
        YSign = -scipy.log10(scipy.array([5.0e-8, 5.0e-8]))
        PylabAxis.plot(
            XSign, YSign, linestyle="--", color="grey", label=r"${\rm " + self.PhenotypeName + "}$", linewidth=1.25
        )
        XSugg = scipy.array(PylabAxis.get_xlim())
        YSugg = -scipy.log10(scipy.array([1.0e-6, 1.0e-6]))
        PylabAxis.plot(XSugg, YSugg, linestyle=":", color="grey", linewidth=1.25)
        PylabAxis.set_ylim([0.0, PylabAxis.get_ylim()[1]])
        #        PylabAxis.set_xlim([-5e7,PylabAxis.get_xlim()[1]])
        PylabAxis.set_xlim([0, XMax])
        Handles, Labels = PylabAxis.get_legend_handles_labels()
        PylabAxis.legend(Handles, Labels, fancybox=True, shadow=True, loc="best")
        PylabAxis.set_xlabel(r"$\rm position$")
        PylabAxis.spines["right"].set_visible(False)
        PylabAxis.spines["top"].set_visible(False)
        PylabAxis.xaxis.set_ticks_position("bottom")
        PylabAxis.yaxis.set_ticks_position("left")
        PylabAxis.xaxis.set_ticks(XTicks)
        PylabAxis.xaxis.set_ticklabels(XTickLabels)
        for Label in PylabAxis.xaxis.get_ticklabels():
            Label.set_rotation(90)
        PylabAxis.set_ylabel(r"$-{\rm log}_{10}(p-{\rm value})$")
        PylabFigure.savefig("Manhattan_" + self.PhenotypeName + ".png")
        PylabAxis.clear()
        pylab.close(PylabFigure)
        del PylabFigure
        del PylabAxis

        return