Ejemplo n.º 1
0
 def opfMeshReg(self,md):
     grd = md.addin.getFullGrid()
     dx,dy = array(grd['dx']), array(grd['dy']);
     nx,ny,x0,y0 = grd['nx'],grd['ny'],grd['x0'],grd['y0']
     xv,yv = r_[x0,x0+cumsum(dx)],r_[y0,y0+cumsum(dy)]
     npt = (nx+1)*(ny+1)
     ic= 0
     l = [];fc = []
     for j in range(ny):
         for i in range(nx):
             if j==0: l.append([j*(nx+1)+i,j*(nx+1)+i+1,ic,-3]) # botm
             if i<nx-1: l.append([j*(nx+1)+i+1,(j+1)*(nx+1)+i+1,ic,ic+1]) # right
             if i==nx-1: l.append([j*(nx+1)+i+1,(j+1)*(nx+1)+i+1,ic,-2]) # right
             if j<ny-1: l.append([(j+1)*(nx+1)+i+1,(j+1)*(nx+1)+i,ic,ic+nx]) # top
             if j==ny-1: l.append([(j+1)*(nx+1)+i+1,(j+1)*(nx+1)+i,ic,-1]) # top
             if i==0: l.append([(j+1)*(nx+1)+i,j*(nx+1)+i,ic,-4])
             fc.append([j*(nx+1)+i,j*(nx+1)+i+1,(j+1)*(nx+1)+i+1,(j+1)*(nx+1)+i,j*(nx+1)+i])
             ic += 1
     fc0 = array(l,ndmin=2);fcup = array(fc,ndmin=2)
     faces = fc0[fc0[:,3]>=0];bfaces = fc0[fc0[:,3]<0]
     ag = argsort(bfaces[:,3])
     bfaces = bfaces[ag[-1::-1]]
     xm,ym = meshgrid(xv,yv)
     points = c_[reshape(xm,(npt,1)),reshape(ym,(npt,1))]
     return points,faces,bfaces,fcup
Ejemplo n.º 2
0
def precision_and_recall(actual, predicted, cls):
    c = (actual == cls)
    si = sp.argsort(-c)
    tp = sp.cumsum(sp.single(predicted[si] == cls))
    fp = sp.cumsum(sp.single(predicted[si] != cls))
    rec = tp / sp.sum(predicted == cls)
    prec = tp / (fp + tp)
    return prec, rec
Ejemplo n.º 3
0
def precision_and_recall(actual,predicted,cls):
    c = (actual == cls)
    si = sp.argsort(-c)
    tp = sp.cumsum(sp.single(predicted[si] == cls))
    fp = sp.cumsum(sp.single(predicted[si] != cls))
    rec = tp /sp.sum(predicted == cls)
    prec = tp / (fp + tp)
    return prec,rec
Ejemplo n.º 4
0
def pca(dat, npca=None, verbose = False):
    if isinstance(dat, sp.ndarray):
        dat = pd.DataFrame(dat)
        names = []
        for i in range(dat.shape[1]):
            names.append("x"+str(i+1))
        dat.columns = names
    names = list(dat.columns)
    nr = dat.shape[0]
    nc = dat.shape[1]
    r = sp.corrcoef(dat, rowvar=False)
    heikin = dat.mean(axis=0)
    bunsan = dat.var(axis=0, ddof=1)
    sd = sp.sqrt(bunsan)
    eval, evec = linalg.eig(r)
    eval = sp.real(eval)
    rank = rankdata(eval, method="ordinal")
    rank = nc+1-rank
    eval2 = eval.copy()
    evec2 = evec.copy()
    for i in range(nc):
        j = sp.where(rank == i+1)[0][0]
        eval[i] = eval2[j]
        evec[:, i] = evec2[:, j]
    contr = eval/nc*100
    cum_contr = sp.cumsum(contr)
    fl = (sp.sqrt(eval)*evec)
    for i in range(nc):
        dat.ix[:, i] = (dat.ix[:, i]-heikin[i]) / sd[i]
    fs = sp.dot(dat, evec*sp.sqrt(nr/(nr-1)))
    if npca is None:
        npca = sp.sum(eval >= 1)
    eval = eval[0:npca]
    cont = eval/nc
    cumc = sp.cumsum(cont)
    fl = fl[:, 0:npca]
    rcum = sp.sum((fl ** 2), axis=1)
    if verbose:
        print("            ", end="")
        for j in range(npca):
            print("{0:>8s}".format("PC"+str(j+1)), end="")
        print("  Contribution")
        for i in range(nc):
            print("{0:>12s}".format(names[i]), end="")
            for j in range(npca):
                print(" {0:7.3f}".format(fl[i, j]), end="")
            print(" {0:7.3f}".format(rcum[i]))
        print("  Eigenvalue", end="")
        for j in range(npca):
            print(" {0:7.3f}".format(eval[j]), end="")
        print("\nContribution", end="")
        for j in range(npca):
            print(" {0:7.3f}".format(cont[j]), end="")
        print("\nCum.contrib.", end="")
        for j in range(npca):
            print(" {0:7.3f}".format(cumc[j]), end="")
        print()
    return {"r":r, "fl":fl, "eval":eval, "fs":fs[:, 0:npca]}
Ejemplo n.º 5
0
def fastunwrap(thetaArray, discont = scipy.pi):
    # takes an array of theta values
    # returns the data in unwrapped form (unwrapping over the axis == 1)
    diff = scipy.zeros_like(thetaArray)
    diff[1:,:] = scipy.diff(thetaArray, axis = 0)
    upSteps = diff > discont
    downSteps = diff < -discont
    shift = scipy.cumsum(upSteps, axis = 0) - scipy.cumsum(downSteps, axis = 0)
    return thetaArray - 2.0*discont*shift
Ejemplo n.º 6
0
def fastunwrap(thetaArray, discont=scipy.pi):
    # takes an array of theta values
    # returns the data in unwrapped form (unwrapping over the axis == 1)
    diff = scipy.zeros_like(thetaArray)
    diff[1:, :] = scipy.diff(thetaArray, axis=0)
    upSteps = diff > discont
    downSteps = diff < -discont
    shift = scipy.cumsum(upSteps, axis=0) - scipy.cumsum(downSteps, axis=0)
    return thetaArray - 2.0 * discont * shift
Ejemplo n.º 7
0
def calc_slist(xx, yy=None, zz=None):
    if yy is None:
        yy = sp.zeros_like(xx)
    if zz is None:
        zz = sp.zeros_like(xx)
    slist = sp.zeros_like(xx, dtype=sp.float64)
    sp.cumsum((sp.diff(xx)**2 + sp.diff(yy)**2 + sp.diff(zz)**2)**(1 / 2),
              out=slist[1:])
    return slist
Ejemplo n.º 8
0
def slidesum(a, n, m):
    from scipy import hstack, vstack, cumsum, zeros, r_, array
    #sliding window summation; averaging window size is [n,m]
    na, ma = a.shape
    aa = vstack(((zeros((1, ma + 1), dtype=float)), (hstack(((zeros(
        (na, 1), dtype=float)), a)))))
    a1 = cumsum(aa, 0)
    a2 = a1[n:na + 1, :] - a1[0:na + 1 - n, :]
    a3 = cumsum(a2, 1)
    ss = a3[:, m:ma + 1] - a3[:, 0:ma + 1 - m]
    return ss
Ejemplo n.º 9
0
def evaluate_tails(ans, preds, tails, topk=1):
    total_matched = sp.zeros(topk, dtype=sp.uint64)
    t_total_matched = sp.zeros(topk, dtype=sp.uint64)
    r_total_matched = sp.zeros(topk, dtype=sp.uint64)
    recall = sp.zeros(topk, dtype=sp.float64)
    t_recall = sp.zeros(topk, dtype=sp.float64)
    r_recall = sp.zeros(topk, dtype=sp.float64)
    q = 0
    p = 0
    r = 0
    for i in trange(ans.shape[0]):
        truth = ans.indices[ans.indptr[i]:ans.indptr[i + 1]]
        tail_truth = get_in_tails(truth, tails)
        if not len(tail_truth):
            p += 1
            t_preds = preds.indices[preds.indptr[i]:preds.indptr[i + 1]][:topk]
            matched = sp.isin(t_preds, truth)
            cum_matched = sp.cumsum(matched, dtype=sp.uint64)
            total_matched[:len(cum_matched)] += cum_matched
            recall[:len(cum_matched)] += cum_matched / len(truth)
            if len(cum_matched) != 0:
                total_matched[len(cum_matched):] += cum_matched[-1]
                recall[len(cum_matched):] += cum_matched[-1] / len(truth)
                continue
        q += 1
        t_preds = preds.indices[preds.indptr[i]:preds.indptr[i + 1]][:topk]
        t_matched = sp.isin(t_preds, tail_truth)
        r_matched = sp.isin(t_preds, truth)
        t_cum_matched = sp.cumsum(t_matched, dtype=sp.uint64)
        r_cum_matched = sp.cumsum(r_matched, dtype=sp.uint64)
        t_total_matched[:len(t_cum_matched)] += t_cum_matched
        r_total_matched[:len(r_cum_matched)] += r_cum_matched
        t_recall[:len(t_cum_matched)] += t_cum_matched / len(tail_truth)
        r_recall[:len(r_cum_matched)] += r_cum_matched / len(truth)
        if len(t_cum_matched) != 0:
            t_total_matched[len(t_cum_matched):] += t_cum_matched[-1]
            t_recall[len(t_cum_matched
                         ):] += t_cum_matched[-1] / len(tail_truth)

        if len(r_cum_matched) != 0:
            r_total_matched[len(r_cum_matched):] += r_cum_matched[-1]
            r_recall[len(r_cum_matched):] += r_cum_matched[-1] / len(truth)

    t_prec = t_total_matched / q / sp.arange(1, topk + 1)
    t_recall = t_recall / q
    r_prec = r_total_matched / q / sp.arange(1, topk + 1)
    r_recall = r_recall / q
    prec = total_matched / p / sp.arange(1, topk + 1)
    recall = recall / p
    print('preds in tails:', q)
    print('preds in non-tails:', p)
    return np.round(t_prec,
                    4), np.round(t_recall, 4), np.round(prec, 4), np.round(
                        recall, 4), np.round(r_prec, 4), np.round(r_recall, 4)
Ejemplo n.º 10
0
	def set_invcovariance(self,xmask,invertcovariance=[],scalecovariance=None):

		del self.params['xmask']
		xmaskall = scipy.concatenate(xmask)

		if self.scale_data_covariance is not None:
			self.logger.info('Scaling covariance by {:.4f}.'.format(scalecovariance))
			self.covariance *= scalecovariance
		self.stddev = scipy.diag(self.covariance[scipy.ix_(xmaskall,xmaskall)])

		error_message = 'The covariance matrix is ill-conditionned. You may want to try the option sliced.'
		if 'sliced' in self.invert_covariance:
			self.logger.info('Slicing covariance.')
			self.covariance = self.covariance[scipy.ix_(xmaskall,xmaskall)]
			error_message = 'The covariance matrix is ill-conditionned. You may want to try the option block.'

		self.covariance = self.covariance.astype(scipy.float64) #make sure we have enough precision

		if 'diagonal' in self.invert_covariance:
			self.logger.info('Inverting diagonal matrix/blocks.')
			def inv(A):
				return scipy.diag(1./scipy.diag(A))
		elif 'cholesky' in self.invert_covariance:
			self.logger.info('Inverting using Choleskys decomposition.')
			def inv(A):
				c = linalg.inv(linalg.cholesky(A)) #using Cholesky's decomposition
				return scipy.dot(c.T,c)
		else:
			self.logger.info('Inverting using linalg inversion.')
			def inv(A):
				return linalg.inv(A)

		if 'block' in self.invert_covariance:
			self.logger.info('Inverting by block.')
			if 'sliced' in self.invert_covariance: blocksize = scipy.cumsum([0] + map(scipy.sum,xmask))
			else: blocksize = scipy.cumsum([0] + map(len,xmask))
			blocks = [[self.covariance[i1:i2,j1:j2] for j1,j2 in zip(blocksize[:-1],blocksize[1:])] for i1,i2 in zip(blocksize[:-1],blocksize[1:])]
			self.invcovariance = utils.blockinv(blocks,inv=inv)
			error_message = 'The covariance matrix is ill-conditionned. You have to provide a better estimate.'
		else:
			self.invcovariance = inv(self.covariance)

		diff = self.covariance.dot(self.invcovariance)-scipy.eye(self.covariance.shape[0])
		diff = scipy.absolute(diff).max()
		self.logger.info('Inversion computed to absolute precision {:.4g}.'.format(diff))
		if diff > 1.: raise LinAlgError(error_message)

		if 'sliced' not in self.invert_covariance:
			self.logger.info('Slicing covariance.')
			self.invcovariance = self.invcovariance[scipy.ix_(xmaskall,xmaskall)]
Ejemplo n.º 11
0
def linspace_weighed(a, b, n, points):
    """Positions 'n' points in range ['a', 'b'] such that space around
       points[:][0] has an additional weight points[:][1] and
       half-width points[:][2].

       The shape of the weight is ``(|x - x0|/w + 1)**(-2)``, so that if the
       range is infinite, w is indeed the half-width of the distribution.
       points[:][1] describes the relative weights of such peaks."""

    x = linspace(a, b, 5*n)
    density = _n.zeros([len(x)], _n.float_)

    for point in points:
        point = list(point)
        
        if point[0] < a:
            point[0] = a
        if point[0] > b:
            point[0] = b

        density_shape = 1 / (abs(x - point[0])/point[2] + 1)**2
        base_weight = scipy.integrate.trapz(density_shape, x)

        density += (point[1] / base_weight) * density_shape

    if len(points) == 0:
        density[:] = 1

    cumdensity = scipy.cumsum(density) - density[0]
    cumdensity /= cumdensity[-1]
    interpolant = scipy.interpolate.interp1d(cumdensity, x)

    y = linspace(0, 1, n)
    return interpolant(y)
Ejemplo n.º 12
0
def weighted_median(points, weights):
        sorted_indices = sp.argsort(points)
        points = points[sorted_indices]
        weights = weights[sorted_indices]
        cs = sp.cumsum(weights)
        median = sp.interp(.5, cs - .5*weights, points)
        return median
Ejemplo n.º 13
0
def make_sweepsound(A, fs, start_freq, end_freq, sec):
    """
    make sweepsound.

    prameters
    ---------------------
    A = 1     #振幅
    fs = 44100 #サンプリング周波数
    start_freq = 20  #始まりの周波数
    end_freq = 20000 #終わりの周波数
    sec = 5   #秒

    return
    ---------------------
    ret: sweepsine signal (numpy array float64)
    """
    
    freqs = linspace(start_freq, end_freq, num = int(round(fs * sec)))
    ### 角周波数の変化量
    phazes_diff = 2. * mpi * freqs / fs
    ### 位相
    phazes = cumsum(phazes_diff)
    ### サイン波合成
    ret = A * sin(phazes)

    return ret
Ejemplo n.º 14
0
def model_time_input(lambd, Tmax):
    t = [0]
    alpha = [np.random.uniform(0, 1) for i in range(1, lambd * Tmax)]
    t.extend(-1. / lambd * np.log(alpha))
    time_input = list(filter(lambda x: x < Tmax, sp.cumsum(t)))
    mean_time_input = (np.sum(t) / len(t)) * 60.
    return time_input, mean_time_input
Ejemplo n.º 15
0
def recombine(G,rates):
    """
    Performs recombination of genotype matrix G corresponding to given rates

    input:      G       Nx2 matrix of integers where each columns give
                        haplotypes for N markers
                rates   (N-1)x1 vector of floats with recombination rates

    Element rates[i] is the rate of recombination between markers i and i+1

    Example:
    #>>> from numpy import random, ones, zeros, column_stack
    >>> G = column_stack((zeros((50,1)),ones((50,1))))
    >>> rates = 0.2*ones((G.shape[0],1))
    >>> random.seed(seed=0)
    >>> recombine(G,rates).T
    array([[ 0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,
             0.,  0.,  1.,  0.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  0.,
             0.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  0.,  0.,  0.,  0.,
             0.,  0.,  0.,  0.,  0.,  1.,  1.,  1.,  1.,  0.,  0.],
           [ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.,
             1.,  1.,  0.,  1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,
             1.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  0.,  1.,  1.,  1.,  1.,
             1.,  1.,  1.,  1.,  1.,  0.,  0.,  0.,  0.,  1.,  1.]])
   
    """
   
    #draw uniform numbers for all marker interval
    crossover = mod(cumsum(random.uniform(size=(G.shape[0],1))<rates),2)
   
    #generate recombined G
    recG = array([ [g[c],g[c-1]] for g,c in zip(G[1:G.shape[0],:],crossover) ])
    recG = vstack((G[0,:],recG))
    
    return recG
Ejemplo n.º 16
0
def compute_rescaled_range(sig, win_len):
    """Compute rescaled range of a given time series at a given scale.

    Parameters
    ----------
    sig : 1d array
        Time series.
    win_len : int
        Window length for each rescaled range computation, in samples.

    Returns
    -------
    rs : float
        Average rescaled range over windows.
    """

    # Demean signal
    sig = sig - np.mean(sig)

    # Calculate cumulative sum of the signal & split the signal into segments
    segments = split_signal(sp.cumsum(sig), win_len).T

    # Calculate rescaled range as range divided by standard deviation (of non-cumulative signal)
    rs_win = np.ptp(segments, axis=0) / np.std(split_signal(sig, win_len).T,
                                               axis=0)

    # Take the mean across windows
    rs = np.mean(rs_win)

    return rs
Ejemplo n.º 17
0
def compute_detrended_fluctuation(sig, win_len, deg=1):
    """Compute detrended fluctuation of a time series at the given window length.

    Parameters
    ----------
    sig : 1d array
        Time series.
    win_len : int
        Window length for each detrended fluctuation fit, in samples.
    deg : int, optional, default=1
        Polynomial degree for detrending.

    Returns
    -------
    det_fluc : float
        Measured detrended fluctuation, as the average error fits of the window.
    """

    # Calculate cumulative sum of the signal & split the signal into segments
    segments = split_signal(sp.cumsum(sig - np.mean(sig)), win_len).T

    # Calculate local trend, as the line of best fit within the time window
    _, fluc, _, _, _ = np.polyfit(np.arange(win_len),
                                  segments,
                                  deg=deg,
                                  full=True)

    # Convert to root-mean squared error, from squared error
    det_fluc = np.mean((fluc / win_len))**0.5

    return det_fluc
Ejemplo n.º 18
0
def impz(b, a=1):
    """Plot step and impulse response of an FIR filter.

    b : float
        Forward terms of the FIR filter.
    a : float
        Feedback terms of the FIR filter. (Default value = 1)

    From http://mpastell.com/2010/01/18/fir-with-scipy/

    Returns
    -------
    None

    """
    l = len(b)
    impulse = np.repeat(0., l)
    impulse[0] = 1.
    x = np.arange(0, l)
    response = sp.lfilter(b, a, impulse)
    plt.subplot(211)
    plt.stem(x, response)
    plt.ylabel('Amplitude')
    plt.xlabel(r'n (samples)')
    plt.title(r'Impulse response')
    plt.subplot(212)
    step = sp.cumsum(response)
    plt.stem(x, step)
    plt.ylabel('Amplitude')
    plt.xlabel(r'n (samples)')
    plt.title(r'Step response')
    plt.subplots_adjust(hspace=0.5)
Ejemplo n.º 19
0
def order1_patterns(t, patterns, pips, nmag0, nmag1):
    # max pattern length?
    maxl = 0
    for i in range(len(patterns)):
        if (len(patterns[i]) > maxl):
            maxl = len(patterns[i])

    order1 = sp.zeros((len(t)))
    for i in range(len(t) - maxl):
        # init any of the patterns:
        for j in range(len(pips)):
            if (sp.rand() < pips[j]):
                # add pattern to order 1:
                curl = len(patterns[j])
                order1[i:i + curl] = order1[i:i + curl] + patterns[j]

    # 1st order noise:
    noise1 = (sp.rand(len(t)) - .5) * nmag1
    # 0th order noise:
    noise0 = (sp.rand(len(t)) - .5) * nmag0

    # add 1st order noise
    order1 = order1 + noise1
    # convert back to 0th order
    order0 = sp.cumsum(order1)
    order0 = order0 + noise0
    return order0
Ejemplo n.º 20
0
def pct_sigma(array):
    """
    Get normal quantiles

    Parameters
    ----------
    x : array_like
        distribtion of values

    Returns
    -------
    sigma : ndarray
      normal quantile
    pct : ndarray
      percentile
    y : ndarray
      value
    """
    qrank = lambda x: ((x - 0.3175) / (x.max() + 0.365))

    y = array.copy()
    y = y[~np.isnan(y)]
    y = np.sort(y)

    if y.size == 0:
        blank = np.zeros(y.shape)
        return blank, blank, blank

    n = sp.ones(len(y))
    cs = sp.cumsum(n)
    pct = qrank(cs)
    sigma = sps.norm.ppf(pct)

    return sigma, pct, y
Ejemplo n.º 21
0
 def eliminatePercentileTails(self, mskDds, loPercentile=10.0, hiPercentile=90.0):
     """
     Trims lower and/or upper image histogram tails by replacing :samp:`mskDds`
     voxel values with :samp:`mskDds.mtype.maskValue()`. 
     """
     rootLogger.info("Eliminating percentile tails...")
     rootLogger.info("Calculating element frequencies...")
     elems, counts = elemfreq(mskDds)
     rootLogger.info("elems:\n%s" % (elems,))
     rootLogger.info("counts:\n%s" % (counts,))
     cumSumCounts = sp.cumsum(counts, dtype="float64")
     percentiles = 100.0*(cumSumCounts/float(cumSumCounts[-1]))
     percentileElems = elems[sp.where(sp.logical_and(percentiles > loPercentile, percentiles < hiPercentile))]
     loThresh = percentileElems[0]
     hiThresh = percentileElems[-1]
     rootLogger.info("Masking percentiles range (%s,%s) = (%s,%s)" % (loPercentile, hiPercentile, loThresh, hiThresh))
     mskDds.asarray()[...] = \
         sp.where(
             sp.logical_and(
                 sp.logical_and(mskDds.asarray() >= loThresh, mskDds.asarray() <= hiThresh),
                 mskDds.asarray() != mskDds.mtype.maskValue()
             ),
             mskDds.asarray(),
             mskDds.mtype.maskValue()
         )
     rootLogger.info("Done eliminating percentile tails.")
Ejemplo n.º 22
0
 def apply_flow(self,flowrate):
     r'''
     Convert the invaded sequence into an invaded time for a given flow rate
     considering the volume of invaded pores and throats.
     
     Parameters
     ----------
     flowrate : float
         The flow rate of the injected fluid
         
     Returns
     -------
     Creates a throat array called 'invasion_time' in the Algorithm 
     dictionary
     
     '''
     P12 = self._net['throat.conns']  # List of throats conns
     a = self['throat.invasion_sequence']  # Invasion sequence
     b = sp.argsort(self['throat.invasion_sequence'])
     P12_inv = self['pore.invasion_sequence'][P12]  # Pore invasion sequence
     # Find if the connected pores were invaded with or before each throat
     P1_inv = P12_inv[:,0] == a
     P2_inv = P12_inv[:,1] == a
     c = sp.column_stack((P1_inv,P2_inv))  
     d = sp.sum(c,axis=1,dtype=bool)  # List of Pores invaded with each throat
     # Find volume of these pores
     P12_vol = sp.zeros((self.Nt,))
     P12_vol[d] = self._net['pore.volume'][P12[c]]
     # Add invaded throat volume to pore volume (if invaded)
     T_vol = P12_vol + self._net['throat.volume']
     # Cumulative sum on the sorted throats gives cumulated inject volume
     e = sp.cumsum(T_vol[b]/flowrate)
     t = sp.zeros((self.Nt,))
     t[b] = e  # Convert back to original order
     self._phase['throat.invasion_time'] = t
Ejemplo n.º 23
0
def continuous_phase(phase, axis=0, center=False):
    """Add and subtract 2 pi such that the phase in the array is
       as continuous as possible, along first or given axis. Optionally,
       it also centers the phase data so that the average is smallest."""

    phase = _n.array(phase, copy=0)

    rowshape = list(phase.shape)
    
    if len(rowshape) > 0:
        rowshape[axis] = 1

        slip = _n.concatenate([ _n.zeros(rowshape),
                                scipy.diff(phase, axis=axis) ],
                              axis=axis)
        slip = _n.around(slip/(2*_n.pi))
        cumslip = scipy.cumsum(slip, axis=axis)

        phase = phase - 2*_n.pi*cumslip
    else:
        pass

    if center:
        offset = _n.around(scipy.average(phase, axis=axis)/(2*_n.pi))
        offset = _n.reshape(offset, rowshape)
        offset = _n.repeat(offset, cumslip.shape[axis], axis=axis)
        phase = phase - 2*_n.pi*offset
    
    return phase
Ejemplo n.º 24
0
def _load_sigma_as_frame(filepath, start=None, end=None):
    '''
    Read data from Sigma icesat-II simulations: http://icesat.gsfc.nasa.gov/icesat2/data/sigma/sigma_data.php

    returns pandas dataframe
    '''
    raw = loadtxt(open(filepath, 'r'))
    raw = raw[start:end]
    columns = ['x', 'y', 'z', 'index', 'signal_flag']
    data_dict = {}
    for col_ind, col in enumerate(columns):
        data_dict[col] = raw[:, col_ind]

    frame = pandas.DataFrame(data_dict)

    # 2D model uses along track distance d
    xy = array(zip(frame['x'], frame['y']))
    d = zeros(len(frame))
    d[1:] = cumsum([sqrt(norm(xy[i] - xy[i-1])) for i in xrange(1, len(frame))])
    frame['d'] = d

    # Assign id to points detected from same pulse
    frame['shot_id'] = gen_shot_ids(frame)
    frame[['shot_id', 'index', 'signal_flag']] = frame[['shot_id', 'index', 'signal_flag']].astype(int)
    frame.filepath = filepath

    return frame
Ejemplo n.º 25
0
 def __init__(self,layers,gridOpts):
   ''' Initialize the grid using the given layers and grid options.
   '''
   segments = []
   qStart   =  scipy.inf
   qEnd     = -scipy.inf
   for layer in layers:
     if layer.isQuantum:
       d1 = dn = gridOpts.dzQuantum
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
       qStart = min(qStart,sum([len(seg) for seg in segments[:-1]]))
       qEnd   = max(qEnd,  sum([len(seg) for seg in segments]))
     elif gridOpts.useFixedGrid:
       d1 = dn = gridOpts.dz
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
     elif layer.thickness*gridOpts.dzCenterFraction > gridOpts.dzEdge:
       d1 = dn = gridOpts.dzEdge
       dc = gridOpts.dzCenterFraction*layer.thickness
       segments += [self.get_dz_segment(d1,dc,layer.thickness/2),
                    self.get_dz_segment(dc,dn,layer.thickness/2)]
     else:
       d1 = dn = gridOpts.dzEdge
       segments += [self.get_dz_segment(d1,dn,layer.thickness)]
   self.dz       = scipy.concatenate(segments)
   self.z        = scipy.concatenate(([0],scipy.cumsum(self.dz)))
   self.zr       = (self.z[:-1]+self.z[1:])/2
   self.znum     = len(self.z)
   self.rnum     = len(self.zr)
   self.gridOpts = gridOpts
   self.qIndex   = scipy.arange(qStart,qEnd+1)   # Wavefunction index
   self.qrIndex  = scipy.arange(qStart,qEnd)     # Quantum region index   
Ejemplo n.º 26
0
    def apply_flow(self, flowrate):
        r"""
        Convert the invaded sequence into an invaded time for a given flow rate
        considering the volume of invaded pores and throats.

        Parameters
        ----------
        flowrate : float
            The flow rate of the injected fluid

        Returns
        -------
        Creates a throat array called 'invasion_time' in the Algorithm
        dictionary

        """
        P12 = self._net['throat.conns']
        a = self['throat.invasion_sequence']
        b = sp.argsort(self['throat.invasion_sequence'])
        P12_inv = self['pore.invasion_sequence'][P12]
        # Find if the connected pores were invaded with or before each throat
        P1_inv = P12_inv[:, 0] == a
        P2_inv = P12_inv[:, 1] == a
        c = sp.column_stack((P1_inv, P2_inv))
        d = sp.sum(c, axis=1, dtype=bool)  # List of Pores invaded with each throat
        # Find volume of these pores
        P12_vol = sp.zeros((self.Nt,))
        P12_vol[d] = self._net['pore.volume'][P12[c]]
        # Add invaded throat volume to pore volume (if invaded)
        T_vol = P12_vol + self._net['throat.volume']
        # Cumulative sum on the sorted throats gives cumulated inject volume
        e = sp.cumsum(T_vol[b] / flowrate)
        t = sp.zeros((self.Nt,))
        t[b] = e  # Convert back to original order
        self._phase['throat.invasion_time'] = t
Ejemplo n.º 27
0
 def eliminatePercentileTails(self,
                              mskDds,
                              loPercentile=10.0,
                              hiPercentile=90.0):
     """
     Trims lower and/or upper image histogram tails by replacing :samp:`mskDds`
     voxel values with :samp:`mskDds.mtype.maskValue()`. 
     """
     rootLogger.info("Eliminating percentile tails...")
     rootLogger.info("Calculating element frequencies...")
     elems, counts = elemfreq(mskDds)
     rootLogger.info("elems:\n%s" % (elems, ))
     rootLogger.info("counts:\n%s" % (counts, ))
     cumSumCounts = sp.cumsum(counts, dtype="float64")
     percentiles = 100.0 * (cumSumCounts / float(cumSumCounts[-1]))
     percentileElems = elems[sp.where(
         sp.logical_and(percentiles > loPercentile,
                        percentiles < hiPercentile))]
     loThresh = percentileElems[0]
     hiThresh = percentileElems[-1]
     rootLogger.info("Masking percentiles range (%s,%s) = (%s,%s)" %
                     (loPercentile, hiPercentile, loThresh, hiThresh))
     mskDds.asarray()[...] = \
         sp.where(
             sp.logical_and(
                 sp.logical_and(mskDds.asarray() >= loThresh, mskDds.asarray() <= hiThresh),
                 mskDds.asarray() != mskDds.mtype.maskValue()
             ),
             mskDds.asarray(),
             mskDds.mtype.maskValue()
         )
     rootLogger.info("Done eliminating percentile tails.")
Ejemplo n.º 28
0
Archivo: utils.py Proyecto: Lx37/pambox
def impz(b, a=1):
    """Plot step and impulse response of an FIR filter.

    b : float
        Forward terms of the FIR filter.
    a : float
        Feedback terms of the FIR filter. (Default value = 1)

    From http://mpastell.com/2010/01/18/fir-with-scipy/

    Returns
    -------
    None

    """
    l = len(b)
    impulse = np.repeat(0., l)
    impulse[0] = 1.
    x = np.arange(0, l)
    response = sp.lfilter(b, a, impulse)
    plt.subplot(211)
    plt.stem(x, response)
    plt.ylabel('Amplitude')
    plt.xlabel(r'n (samples)')
    plt.title(r'Impulse response')
    plt.subplot(212)
    step = sp.cumsum(response)
    plt.stem(x, step)
    plt.ylabel('Amplitude')
    plt.xlabel(r'n (samples)')
    plt.title(r'Step response')
    plt.subplots_adjust(hspace=0.5)
Ejemplo n.º 29
0
def normalizeHistogram(data):
    histogram = scipy.ndimage.histogram(data.astype("f"), 0, 255, 256)
    cumulatedHistogram = scipy.cumsum(histogram)
    nch = cumulatedHistogram.astype("f")/len(data.flat)
    inch = (nch*255).astype("i")
    normalize = scipy.vectorize(lambda i: inch[i])
    return normalize(data)
Ejemplo n.º 30
0
def numpy_resample(qs, xs, rands):
    results = np.empty_like(qs)
    lookup = sp.cumsum(qs)
    for j, key in enumerate(rands):
        i = sp.argmax(lookup > key)
        results[j] = xs[i]
    return results
Ejemplo n.º 31
0
 def __init__(self, layers, gridOpts):
     ''' Initialize the grid using the given layers and grid options.
 '''
     segments = []
     qStart = scipy.inf
     qEnd = -scipy.inf
     for layer in layers:
         if layer.isQuantum:
             d1 = dn = gridOpts.dzQuantum
             segments += [self.get_dz_segment(d1, dn, layer.thickness)]
             qStart = min(qStart, sum([len(seg) for seg in segments[:-1]]))
             qEnd = max(qEnd, sum([len(seg) for seg in segments]))
         elif gridOpts.useFixedGrid:
             d1 = dn = gridOpts.dz
             segments += [self.get_dz_segment(d1, dn, layer.thickness)]
         elif layer.thickness * gridOpts.dzCenterFraction > gridOpts.dzEdge:
             d1 = dn = gridOpts.dzEdge
             dc = gridOpts.dzCenterFraction * layer.thickness
             segments += [
                 self.get_dz_segment(d1, dc, layer.thickness / 2),
                 self.get_dz_segment(dc, dn, layer.thickness / 2)
             ]
         else:
             d1 = dn = gridOpts.dzEdge
             segments += [self.get_dz_segment(d1, dn, layer.thickness)]
     self.dz = scipy.concatenate(segments)
     self.z = scipy.concatenate(([0], scipy.cumsum(self.dz)))
     self.zr = (self.z[:-1] + self.z[1:]) / 2
     self.znum = len(self.z)
     self.rnum = len(self.zr)
     self.gridOpts = gridOpts
     self.qIndex = scipy.arange(qStart, qEnd + 1)  # Wavefunction index
     self.qrIndex = scipy.arange(qStart, qEnd)  # Quantum region index
Ejemplo n.º 32
0
    def compute_accuracy(self):
        """Computes accuracy across the range in `self.date_range`.

        Returns: a pandas DataFrame with three columns corresponding to each
            kind of prediction method (PredPol, perfect prediction (god), and
            the baseline (naive_count)). The entries of each column are an array
            where the ith entry is the average accuracy over `self.date_range`
            when visiting i number of grid cells
        """
        accuracy = {
            method: sp.zeros((len(self.results), len(self.lambda_columns)))
            for method in ['predpol', 'god', 'naive_count']
        }
        naive_count = count_seen(self.pred_obj, self.pred_obj.train)['num_observed']

        for i, (lambda_col, actual_col) in self._iterator():
            actual_vals = self.results[actual_col].values
            accuracy['god'][:, i] = sp.sort(actual_vals)[::-1]

            sorted_idx = sp.argsort(self.results[lambda_col])[::-1]
            accuracy['predpol'][:, i] = actual_vals[sorted_idx]

            sorted_idx = sp.argsort(naive_count)[::-1]
            accuracy['naive_count'][:, i] = actual_vals[sorted_idx]

            naive_count += self.results[actual_col]

        # Compute CI and p-values here
        for k, v in accuracy.items():
            accuracy[k] = sp.sum(v, axis=1)
            accuracy[k] = sp.cumsum(accuracy[k] / sp.sum(accuracy[k]))
        return pd.DataFrame(accuracy)
Ejemplo n.º 33
0
    def initialize(self, state, chain):
        params = {}
        for key in self.scan_range.keys():
            # Check for single range
            if len(self.scan_range[key]) == 2:
                params[key] = sp.rand() * (self.scan_range[key][1] - self.scan_range[key][0]) + self.scan_range[key][0]
            else:
                # calculate weights of sub_regions
                sub_size = sp.array([])
                # Determine weights of region
                for i in range(0, len(self.scan_range[key]), 2):
                    sub_size = sp.append(sub_size, self.scan_range[key][i + 1] - self.scan_range[key][i])
                    self.range_weight[key] = sub_size / float(sp.sum(sub_size))

                # sample region based on size
                i_sel = 2 * sp.searchsorted(sp.cumsum(self.range_weight[key]), sp.rand())
                # sample point
                params[key] = (
                    sp.rand() * (self.scan_range[key][i_sel + 1] - self.scan_range[key][i_sel])
                    + self.scan_range[key][i_sel]
                )

        # params=dict([(key,sp.rand()*(self.scan_range[key][1]-self.scan_range[key][0])+self.scan_range[key][0]) for key in self.scan_range.keys() if type(self.scan_range[key])==list])

        # Add constant parameters
        for key in self.constants.keys():
            params[key] = self.constants[key]

        for key in self.functions.keys():
            params[key] = self.functions[key](params)

        modelid = "%i%01i" % (self.rank, 0) + "%i" % chain.accepted

        return params, modelid
Ejemplo n.º 34
0
    def __init__(self, trace):
        """
		Takes a list of coordinate tupples and computes metrics required for realizing a specific bubble linker path.
		usable metrics are as follows.
		_trace:
			#array of x,y coordinates of on single _trace
		_ld:
			#distance between succesive points linked diff (ld) and the distance be all points as a matrix (d)
			#index 0 refers to distance between 0,1 in _trace, distance
			#index -1 refers to distance between -2,-1 in _trace
		_cld:
			#cumulative distance between coordinates starting at 0,1
			#there is no index 0 
			#index i refers to the distance traveled to get to index i+1 in _trace	
		_ll:
			#length of the whole molecule in the coordinate system
		_d:
			#distance between every point and every other
		"""
        self._trace = scipy.array(trace)
        self._ld = scipy.array([
            scipy.spatial.distance.euclidean(i, j)
            for i, j in zip(self._trace[:-1], self._trace[1:])
        ])
        self._cld = scipy.concatenate(([0], scipy.cumsum(self._ld)))
        self._ll = scipy.sum(self._ld)
        self._d = scipy.spatial.distance.squareform(
            scipy.spatial.distance.pdist(self._trace, 'euclidean'))
        self._d = scipy.ma.masked_where(self._d == 0,
                                        self._d)  ##mask self distances
Ejemplo n.º 35
0
    def moleculify(self, fr, fl, rr, rl, length):
        """
		takes a representation of a trace fir region definitions and labels and a length in basepairs
		of a molecule and returns a Chromatin.molecule version.
		"""
        #mean length
        if len(fl) != len(rl) or (sum((fl + rl) == 1) > 0):
            return (None)

        region_lengths = scipy.array([
            sum((self._cld[r1[1] - 2] - self._cld[r1[0]],
                 self._cld[r2[1] - 2] - self._cld[r2[0]])) / 2
            for r1, r2 in zip(fr, rr)
        ])
        exclusive_end_pts = scipy.ceil(length * scipy.ndarray.round(
            scipy.cumsum(region_lengths) / sum(region_lengths), decimals=3))

        inclusive_start_pts = scipy.concatenate(([0], exclusive_end_pts[:-1]))

        regions = scipy.array([
            (s, e) for s, e in zip(inclusive_start_pts, exclusive_end_pts)
        ])
        molecule = Chromatin.Molecule([
            Chromatin.Region(l, length - e, length - s, e - s)
            for (s, e), l in reversed(list(zip(regions, fl)))
        ])
        return (molecule)
Ejemplo n.º 36
0
    def __init__(self, geneseq, *, seed=1, wt_latent=5,
                 norm_weights=((0.4, -0.7, 1.5), (0.6, -7, 3.5)),
                 stop_effect=-15, min_observed_enrichment=0.001):
        """See main class docstring for how to initialize."""
        self.wt_latent = wt_latent

        if not (0 <= min_observed_enrichment < 1):
            raise ValueError('not 0 <= `min_observed_enrichment` < 1')
        self.min_observed_enrichment = min_observed_enrichment

        # simulate muteffects from compound normal distribution
        self.muteffects = {}
        if seed is not None:
            random.seed(seed)
        weights, means, sds = zip(*norm_weights)
        cumweights = scipy.cumsum(weights)
        for icodon in range(len(geneseq) // 3):
            wt_aa = CODON_TO_AA[geneseq[3 * icodon: 3 * icodon + 3]]
            for mut_aa in AAS_WITHSTOP:
                if mut_aa != wt_aa:
                    if mut_aa == '*':
                        muteffect = stop_effect
                    else:
                        # choose Gaussian from compound normal
                        i = scipy.argmin(cumweights < random.random())
                        # draw mutational effect from chosen Gaussian
                        muteffect = random.gauss(means[i], sds[i])
                    self.muteffects[f"{wt_aa}{icodon + 1}{mut_aa}"] = muteffect
Ejemplo n.º 37
0
    def blobs(shape, porosity, blobiness=8):
        """
        Generates an image containing amorphous blobs

        Parameters
        ----------
        shape : list
            The size of the image to generate in [Nx, Ny, Nz] where N is the
            number of voxels

        blobiness : scalar
            Controls the morphology of the image.  A higher number results in
            a larger number of smaller blobs.

        porosity : scalar
            The porosity of the final image.  This number is approximated by
            the method so the returned result may not have exactly the
            specified value.

        """
        if sp.size(shape) == 1:
            shape = sp.full((3, ), int(shape))
        [Nx, Ny, Nz] = shape
        sigma = sp.mean(shape)/(4*blobiness)
        mask = sp.rand(Nx, Ny, Nz)
        mask = spim.gaussian_filter(mask, sigma=sigma)
        hist = sp.histogram(mask, bins=1000)
        cdf = sp.cumsum(hist[0])/sp.size(mask)
        xN = sp.where(cdf >= porosity)[0][0]
        im = mask <= hist[1][xN]
        return im
Ejemplo n.º 38
0
def pct_sigma(array):
    """
    Get normal quantiles

    Parameters
    ----------
    x : array_like
        distribtion of values

    Returns
    -------
    sigma : ndarray
      normal quantile
    pct : ndarray
      percentile
    y : ndarray
      value
    """
    qrank = lambda x: ((x - 0.3175)/(x.max() + 0.365))

    y = array.copy()
    y = y[~np.isnan(y)]
    y = np.sort(y)

    if y.size == 0:
        blank = np.zeros(y.shape)
        return blank, blank, blank

    n = sp.ones(len(y))
    cs = sp.cumsum(n)
    pct = qrank(cs)
    sigma = sps.norm.ppf(pct)

    return sigma, pct, y
Ejemplo n.º 39
0
    def blobs(shape, porosity, blobiness=8):
        """
        Generates an image containing amorphous blobs

        Parameters
        ----------
        shape : list
            The size of the image to generate in [Nx, Ny, Nz] where N is the
            number of voxels

        blobiness : scalar
            Controls the morphology of the image.  A higher number results in
            a larger number of smaller blobs.

        porosity : scalar
            The porosity of the final image.  This number is approximated by
            the method so the returned result may not have exactly the
            specified value.

        """
        if sp.size(shape) == 1:
            shape = sp.full((3, ), int(shape))
        [Nx, Ny, Nz] = shape
        sigma = sp.mean(shape) / (4 * blobiness)
        mask = sp.rand(Nx, Ny, Nz)
        mask = spim.gaussian_filter(mask, sigma=sigma)
        hist = sp.histogram(mask, bins=1000)
        cdf = sp.cumsum(hist[0]) / sp.size(mask)
        xN = sp.where(cdf >= porosity)[0][0]
        im = mask <= hist[1][xN]
        return im
Ejemplo n.º 40
0
def align_chain_to_seq(sequence,chain,verbose=False):
	#Build Polypeptides from the chains
	polypeptides = build_polypeptides(chain)
	
	#Can't be broken out into another function, because we need seq_lens
	contiguous_seqs = [single_pp.get_sequence().tostring() for single_pp in polypeptides]
	ATOM_joined_seq = ''.join(contiguous_seqs)
	
	seq_lens = [0] + [len(single_pp) for single_pp in polypeptides]


	#Figuring all of this out took days...
	#I am so tired of dealing with mapping various numberings around
	#I wish Biopython, especially Bio.pairwise2 had better documentation
	breaks = set(S.cumsum(seq_lens) )#TODO : Tear hair out GYAAAAA
	
	nogaps = lambda x,y: -2000 -200*y #There really should not be inserts with respect to the database sequence.

	def specificgaps(x,y):
		if x in breaks:#very minor penalty for gaps at breaks in the PDB structure, consider using 0
			return (0 -y)
		else:
			return (-2000 -200*y)#strongly discourage gaps anywhere else.
	
	alignments = __PW.align.globalxc(sequence.seq.tostring(),ATOM_joined_seq,nogaps,specificgaps)
	
	if verbose:
		#some output?
		for a in alignments:
			__stderr.write( __PW.format_alignment(*a) )
			__stderr.write('\n')

	return alignments
Ejemplo n.º 41
0
def calc_rmat(rho):
    L = len(rho)
    mat = SP.zeros([L,L])
    cr = SP.cumsum(rho)
    for l1 in range(L):
        for l2 in range(l1, L):
            mat[l1,l2] = 0.5*(1-SP.exp(-2*(cr[l2] - cr[l1])))
    return mat
Ejemplo n.º 42
0
def calc_random_pattern_exponential(tau,tStart=0,tDuration=1000,tTotal=1000,channel=0,name=''):
    """Calculate a sequence of random state changes with intervals drawn from an
    exponential distribution.

    Parameters
    ----------
    tau: float
        the scale parameter of the exponential distribution (in ms)

    tStart: int
        the time point at which the random changes start (in ms)
        
    tDuration: int
        the total length of the time section in which random changes can occur
        (in ms)
        
    tTotal: int
        the total length of the pattern (in ms)
        
    channel: int
        the channel to switch
        
    name: str
        the name of the pattern
        
    Returns
    -------
    Pattern: RIOpattern
        The generated RIOpattern instance
        
    """
    
    # a little ugly but guarantees to run
    change_times = [0]
    while sp.cumsum(change_times)[-1] < tDuration:
        change_times.append(stats.distributions.expon.rvs(scale=tau))
    
    change_times = sp.cumsum(change_times[1:-1]).astype('int32') + tStart
    
    # to make sure it ends latest at tStart+tDuration
    if len(change_times) % 2 == 1:
        change_times = sp.concatenate((change_times,[tStart+tDuration]))
    
    state_vec = change_times2state_vec(change_times,tTotal)
    Pattern = RIOpattern(name=name, Pulses=States2RIOpulses(state_vec,channel), total_duration=tTotal)
    return Pattern, state_vec
Ejemplo n.º 43
0
 def sorted_csr_from_coo(shape, row_idx, col_idx, val, only_topk=None):
     m = (sp.absolute(val).sum() + 1) * 3
     sorted_idx = sp.argsort(row_idx * m - val)
     row_idx[:] = row_idx[sorted_idx]
     col_idx[:] = col_idx[sorted_idx]
     val[:] = val[sorted_idx]
     indptr = sp.cumsum(sp.bincount(row_idx + 1, minlength=(shape[0] + 1)))
     if only_topk is not None and isinstance(only_topk, int):
         only_topk = max(min(1, only_topk), only_topk)
         selected_idx = (sp.arange(len(val)) - indptr[row_idx]) < only_topk
         row_idx = row_idx[selected_idx]
         col_idx = col_idx[selected_idx]
         val = val[selected_idx]
     indptr = sp.cumsum(sp.bincount(row_idx + 1, minlength=(shape[0] + 1)))
     return smat.csr_matrix((val, col_idx, indptr),
                            shape=shape,
                            dtype=val.dtype)
Ejemplo n.º 44
0
def shapesToSections(shapes):
    sections = []
    sections.append(shapes['syn_wo'][0] * shapes['syn_wo'][1])
    sections.append(shapes['inter_ws'][0] * shapes['inter_ws'][1])
    sections.append(shapes['syn_wi'][0] * shapes['syn_wi'][1])
    sections.append(shapes['wi'][0] * shapes['wi'][1])
    sections.append(shapes['wo'][0] * shapes['wo'][1])
    return cumsum(sections)
Ejemplo n.º 45
0
 def share_slices(counts):
     cumcounts = scipy.cumsum(counts)
     cedges = scipy.linspace(0, cumcounts[-1] + 1, ncuts + 1)
     cutnumber = scipy.digitize(cumcounts, cedges) - 1
     assert (cutnumber >= 0).all() and (cutnumber < ncuts).all()
     return [
         scipy.flatnonzero(cutnumber == icut) for icut in range(ncuts)
     ]
Ejemplo n.º 46
0
def plotCumSumVariance(var=None,filename="cumsum.pdf"):
    pl.figure()
    pl.plot(sp.arange(var.shape[0]),sp.cumsum(var)*100)
    pl.xlabel("Principle Component")
    pl.ylabel("Cumulative Variance Explained in %")
    pl.grid(True)
    #Save file
    pl.savefig(filename)
Ejemplo n.º 47
0
def calculateThreshold(image, coveragePercent):
    import scipy
    data = image.data
    histogram = scipy.histogram(data, len(scipy.unique(data)))
    cumsum = scipy.cumsum(histogram[0])
    targetValue = cumsum[-1] * coveragePercent
    index = scipy.argmin(scipy.absolute(cumsum - targetValue))
    threshold = histogram[1][index]
    return threshold * image.unit
Ejemplo n.º 48
0
 def prandom_walk_from_here(self,length=10,edge_filter=None):
     import scipy
     if (length<=0):
         return [self]
     if (not edge_filter):
         edge_filter=lambda x,y:1
     sm=map(lambda e:edge_filter(e,length)*e.strength(),self.outedges())
     rv=(random.random()*sum(sm))
     idx=sum((scipy.cumsum(sm)<rv).astype(int))
     return [self]+(self.outedges()[idx]).target().prandom_walk_from_here(length-1,edge_filter)
Ejemplo n.º 49
0
def nonna_select_data(data, outlier_threshold, level='high'):
	"""
	This function returns a list of indexed after identifying the main outliers. It applies
	a cut on the data to remove exactly a fraction (1-outlier_threshold) of all data points.
	By default the cut is applied only at the higher end of the data values, but the 
	parameter level can be used to change this
	
	Input arguments:
	data              = vector containing all data points
	outlier_threshold = remove outliers until we are left with exactly this fraction of the
	                    original data
	level             = 'high|low|both' determines if the outliers are removed only from the
					    high values end, the low values end of both ends.
					    
	Output:
	idx               = index of selected (good) data
	"""
	
	# histogram all the data values
	n,x = scipy.histogram(data, len(data)/10)
	# compute the cumulative distribution and normalize
	nn = scipy.cumsum(n)
	nn = nn / float(max(nn))
	
	if level=='high':
		# select the value such that a fraction outlier_threshold of the data lies below it
		if outlier_threshold < 1:
			val = x[pylab.find(nn/float(max(nn)) >= outlier_threshold)[0]]
		else:
			val = max(data)
		# use that fraction of data only
		idx = data <= val 
	elif level=='low':
		# select the value such that a fraction outlier_threshold of the data lies above it
		if outlier_threshold < 1:
			val = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold))[-1]]
		else:
			val = min(data)
		# use that fraction of data only
		idx = data >= val 
	elif level=='both':		
		# select the value such that a fraction outlier_threshold/2 of the data lies below it
		if outlier_threshold < 1:
			Hval = x[pylab.find(nn/float(max(nn)) >= 1-(1-outlier_threshold)/2)[0]]
		else:
			Hval = max(data)	
		# select the value such that a fraction outlier_threshold/2 of the data lies above it
		if outlier_threshold < 1:
			Lval = x[pylab.find(nn/float(max(nn)) <= (1-outlier_threshold)/2)[-1]]
		else:
			Lval = min(data)
  		# use that fraction of data only
		idx = scipy.logical_and(data >= Lval, data <= Hval) 
	
	return idx
Ejemplo n.º 50
0
def sma(series, window):
    ''' returns an unweighted mean of the previous "window" values (data points) for the time series "series".
    
        series is a list ordered from oldest to most recent
        window is an integer, representing the amount of past values used to calculate the mean
        
        returns a numeric value of the mean for the provided "window"
    '''
    series = array(series)
    constant = cumsum(series)
    return (constant[window-1:] - constant[:-window+1] / float(window))
Ejemplo n.º 51
0
def velocity_dof(domain, ax):
    # Calculate velocity dof numbers forr each cell
    rm = roll( domain, 1, axis=ax )
    type_3 = logical_and( domain, rm )
    type_2 = logical_or(  domain, rm )
    
    dof = cumsum( logical_not( logical_or( type_3, type_2 ) ) ).reshape( domain.shape ) - 1
    # Do logic to figure out type 2 and 3
    dof[type_2 == 1] = -2
    dof[type_3 == 1] = -3

    return dof.astype(int64)
Ejemplo n.º 52
0
 def update(self):
        print "<update>",
        training_set= numpy.vsplit(self.model.sample(self.gen_samples), self.gen_samples )
        training_set+=self.new_examples
        self.model.train(numpy.vstack(training_set))
        self.examples=self.examples+self.new_examples
        exl=self.likelihood_of_examples()
        self.like_threshold=scipy.mean(exl)
        exc=(scipy.cumsum(exl)/(scipy.sum(exl)+1e-43))[:-1]
        self.examples=[ self.examples[(exc<random.random()).astype(int).sum()]  for i in range(self.max_examples) ]
        self.new_examples=[]
        print "</update>"
Ejemplo n.º 53
0
def multinomial(u, pvals):
    """Draw from multinomial

    Parameters
    ----------------
    u : float
        Number in 0, 1 interval
    pvals : (k, ) ndarray
        Probability mass function of a discrete distribution

    Returns
    ----------------
    y : int

    """
    cdf = sp.cumsum(pvals)
    if u >= 1:
        y = (cdf >= 1).nonzero()[0].min()        
    else:
        y = (u <= sp.cumsum(pvals)).nonzero()[0].min()
    return y
Ejemplo n.º 54
0
def sample_softmax(X):
    """
    numpy.array -> numpy.array
    
    Returns an array with output[j, i] = 1 iif Xj = i is sampled
    """
    if len(np.shape(X)) == 1: X.shape = (1, len(X))
    num_points = np.shape(X)[0]
    output = np.zeros(np.shape(X))
    b = np.sum(np.cumsum(X.T,0) < rand(num_points), 0) - 1
    output[range(num_points),b] = 1
    return output
Ejemplo n.º 55
0
def muz_bar(qt, F, mu, mol_I):
    t = qt[:,0]
    q = qt[:,1:5]
    w = qt[:,5:8]
    
    Q = q_to_rotmatrix(q)

    # transform the dipole to space fixed coords
    fsp = dot(Q, mu)

    n = arange(fsp.shape[0])+1.0
    fspav = cumsum(fsp,axis=0) / n[:,newaxis]
    return fspav[-1,2]  
Ejemplo n.º 56
0
def gen_single_trial(interval_lengths, rates):
    """ Generates a single spike train with intervals of length
    `interval_lengths` and the firing rates given in `rates`.
    """
    boundaries = sp.ones(len(interval_lengths) + 1) * pq.s
    boundaries[1:] = [l.rescale(boundaries.units) for l in interval_lengths]
    rates = rates[sp.nonzero(boundaries[1:])]
    boundaries = boundaries[sp.nonzero(boundaries)]
    boundaries[0] = 0.0 * pq.s
    boundaries = sp.cumsum(boundaries)
    return stools.st_concatenate([stg.gen_homogeneous_poisson(
        rate, t_start=boundaries[i], t_stop=boundaries[i + 1])
        for i, rate in enumerate(rates)])
Ejemplo n.º 57
0
def estim_d(E, threshold):
    """ The function estimates the intrinsic dimension by looking at the cumulative variance
    Input:
        E: the eigenvalue
        threshold: the percentage of the cumulative variance
    Output:
        d: the intrinsic dimension
    """
    if E.size == 1:
        d = 1
    else:
        d = sp.where(sp.cumsum(E) / sp.sum(E) > threshold)[0][0] + 1
    return d
Ejemplo n.º 58
0
def cumulativeBestHand(johnTrial = False):
    """
    Basically the same as above, but instead of the difference from the
    mean, the integrated difference from the mean.
    """
    intDelX = {}
    delX = bestHand(johnTrial)

    intDelX = copy.deepcopy(delX)
    for key in sorted(delX.keys()):
        for i in range(len(delX[key][0][0])):
            for j in range(2):
                intDelX[key][j][0] = scipy.cumsum(delX[key][j][0])

    return intDelX