def analyzeSound(self):
        """ highlights N first peaks in frequency diagram
        """
        # on recharge les données 
        data = self.data
        sample_freq = self.sample_freq
        from scipy.fftpack import fftfreq
        freq_vect = fftfreq(data.size) * sample_freq
        
        # on trouve les maxima
        y0 = abs(fft(data))
#        y1 = abs(fft(data[:, 1]))
        maxi0 = ((diff(sign(diff(y0))) < 0) & (y0[1:-1] > y0.max()/10.)).nonzero()[0] + 1 # local max
        # maxi1 = ((diff(sign(diff(y1))) < 0) & (y1[1:-1] > y1.max()/10.)).nonzero()[0] + 1 # local max
        
        # fréquence
        ax = self.main_figure.figure.add_subplot(212)
        ax.plot(freq_vect[maxi0], y0[maxi0], "o")
        # ax.plot(freq_vect[maxi1], y1[maxi1], "o")
        
        # annotations au dessus d'une fréquence de coupure
        fc = 100
        for point in maxi0[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y0[point]))
#        for point in maxi1[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
#            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y1[point]))
        
        self.ui.main_figure.canvas.draw()
示例#2
0
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(pl.log(pl.exp(gamma).mean()/10.), pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
示例#3
0
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(
                pl.log(pl.exp(gamma).mean() / 10.),
                pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(
                pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
示例#4
0
def _grad_theta(psi, xxx_todo_changeme1):
   """
   Compute the 1D gradient of a scalar field in the theta direction on a unit-
   radius spherical shell.  Assumes psi is a 2D array with theta changing along
   axis 1. We use central differences for interior points, one-sided differences
   for exterior points, and address simple periodic boundaries.
   """
   (phi,theta) = xxx_todo_changeme1
   dphi = p.diff(phi,axis=0)
   dtheta = p.diff(theta,axis=1)


   # pre-allocate output grid
   dpsidtheta = p.zeros(theta.shape)
   
   
   # use weighted central differences to compute theta gradient on the interior
   dpsidtheta[:,1:-1] = (((p.diff(psi[:,:-1],axis=1) / dtheta[:,:-1]**2 +
                           p.diff(psi[:,1:],axis=1) / dtheta[:,1:]**2) /
                          (1/dtheta[:,:-1] + 1/dtheta[:,1:]) ) )

      
   # compute theta gradients at exterior points
   if p.mod(theta[0,0],2*p.pi) == p.mod(theta[0,-1],2*p.pi):
      # use weighted central differences to compute gradient if periodic boundary
      dpsidtheta[:,[0,-1]] = p.tile(((p.diff(psi[:,:2],axis=1) / dtheta[:,0]**2 +
                                      p.diff(psi[:,-2:],axis=1) / dtheta[:,-1]**2) /
                                     (1/dtheta[:,0] + 1/dtheta[:-1]) ), (1,2) )
   else:
      # use one-sided difference to compute gradient if not a periodic boundary
      dpsidtheta[:,-1] = (p.diff(psi[:,-2:],axis=1).T / dtheta[:,-1])
      dpsidtheta[:,0] = (p.diff(psi[:,:2],axis=1).T / dtheta[:,0])
   
   return dpsidtheta
示例#5
0
def postprocessCpData(data,geo,newxcount):
    x = data[:,0]
    y = geo[:,1]
    Cp = data[:,1]
 
    n = data.shape[0]
 
    # compute finite difference of x to classify points as upper and lower airfoil surface
    dx = pl.diff(x)
    dy = pl.diff(y)
    L = pl.sqrt(dx**2+dy**2)
    Tx = dx/L
    Ty = dy/L
    Nx = -Ty
    Ny = +Tx
    T = pl.array((Tx,Ty))
    T = T.transpose()
    N = pl.array((Nx,Ny))
    N = N.transpose()
 
    midx = (x[0:n-1]+x[1:n])/2.0
    midy = (y[0:n-1]+y[1:n])/2.0
    midcp = (Cp[0:n-1]+Cp[1:n])/2.0
 
    Tnode = pl.zeros( (n,2), pl.float64)
    Nnode = pl.zeros( (n,2), pl.float64)
    for i in range(1,n-1):
        Tnode[i,:] = bisector( T[i-1,:], T[i,:] )
        Nnode[i,:] = bisector( N[i-1,:], N[i,:] )
    Tnode[0,:] = bisector( T[0,:], T[-1,:] )
    Tnode[-1,:] = bisector( T[0,:], T[-1,:] )
    Nnode[0,:] = bisector( N[i-1,:], N[i,:] )
    Nnode[-1,:] = bisector( N[i-1,:], N[i,:] )
 
    # determine (safe) limits of x for interpolation
    xmin = min( min(x[dx<0]),min(x[dx>=0]) )
    xmax = max( max(x[dx<0]),max(x[dx>=0]) )
 
    # re-compute lower and upper Cp at new abscissae
    if ChebyshevSpacing:
        xnew = pl.linspace(pl.pi, 0, newxcount)
        xnew = (pl.cos(xnew)+1)/2.0
    else:
        xnew = pl.linspace(xmin, xmax, newxcount)
 
    newCpUpper = pl.interp(xnew, pl.flipud(x[dx<0]), pl.flipud(Cp[dx<0]))     
    newCpLower = pl.interp(xnew, x[dx>=0], Cp[dx>=0])
 
    return (x,y,Cp,L,T,N,midx,midy,midcp,Tnode,Nnode,xnew,newCpUpper,newCpLower)
示例#6
0
def remove_discontinuity(value, xgap=10, ygap=200):
    """
    Remove discontinuity (sudden jump) in a series of values.
    Written by Denis, developed for LLC Fringe Counts data.
    value : list or numpy.array
    xgap  : "width" of index of the list/array to adjust steps
    ygap  : threshold value to detect discontinuity
    """
    difflist = pl.diff(value)
    discont_index = pl.find(abs(difflist) > ygap)

    if len(discont_index) == 0:
        return value
    else:
        discont_index = pl.append(discont_index, len(difflist))

    # find indice at discontinuities
    discont = {'start': [], 'end': []}
    qstart = discont_index[0]
    for i in range(len(discont_index)-1):
        if discont_index[i+1]-discont_index[i] > xgap:
            qend = discont_index[i]
            discont['start'].append(qstart-xgap)
            discont['end'].append(qend+xgap)
            qstart = discont_index[i+1]

    # add offsets at discontinuities
    result = pl.array(value)
    for i in range(len(discont['end'])):
        result[0:discont['start'][i]] += \
            result[discont['end'][i]] - result[discont['start'][i]]

    #remove the median
    result=result-pl.median(result)
    return result
示例#7
0
def beat_track(x, feature=LogFrequencySpectrum, **kwargs):
    """
    Scheirer beat tracker. Use output of comb filter bank on filterbank sub-bands
                           to estimate tempo, and comb filter state to track beats.
    inputs:
       x        - the audio signal or filename to analyze
       feature  - the feature class to use [LogFrequencySpectrum]
       **kwargs - parameters to the feature extractor [nbpo=1, nhop=441]
    outputs:
       z      - per-tempo comb filter summed outputs
       tempos - the range of tempos in z
       D      - the differentiated half-wave rectified octave-band filterbank outputs
    """
    kwargs.setdefault('nhop',441)
    kwargs.setdefault('nbpo',1)
    F = feature(x, **kwargs)
    frame_rate = F.sample_rate / float(F.nhop)
    D = diff(F.X,axis=1)
    D[where(D<0)] = 0
    tempos = range(40,200,4)    
    z = zeros((len(tempos), D.shape[0]))    
    for i, bpm in enumerate(tempos): # loop over tempos to test
        t = int(round(frame_rate * 60. / bpm)) # num frames per beat
        alpha = 0.5**(2.0/t)
        b = [1 - alpha]
        a = zeros(t)
        a[0] = 1.0
        a[-1] = alpha
        z[i,:] = lfilter(b, a, D).sum(1) # filter and sum sub-band onsets    
    return z,tempos,D
示例#8
0
 def deriv_sign_rate(f=rate,
                     age_indices=age_indices,
                     tau=1.e14,
                     deriv=deriv,
                     sign=sign):
     df = pl.diff(f[age_indices], deriv)
     return mc.normal_like(pl.absolute(df) * (sign * df < 0), 0., tau)
示例#9
0
def visualize_steps(mod, fname='mod.avi', description_str=''):
    times = list(pl.arange(0, 30, .2)) + range(30, 200) + range(200, 1500, 10)
    times += range(1500, 1700) + range(1700, 3000, 10)
    times += range(3000, 3200) + range(3200, len(mod.X.trace()), 10)
    assert pl.all(
        pl.diff(times) >= 0.
    ), 'movies where time is not increasing are confusing and probably unintentional'
    try:
        print 'generating %d images' % len(times)
        for i, t in enumerate(times):
            if i % 100 == 99:
                print '%d of %d (t=%.2f)' % (i, len(times), t)
            sys.stdout.flush()
            visualize_single_step(mod, int(t), t - int(t), description_str)
            pl.savefig('mod%06d.png' % i)
    except KeyboardInterrupt:
        pass

    import subprocess
    subprocess.call(
        'mencoder mf://mod*.png -mf w=800:h=600 -ovc x264 -of avi -o %s' %
        fname,
        shell=True)
    subprocess.call('mplayer -loop 1 %s' % fname, shell=True)
    subprocess.call('rm mod*.png', shell=True)
示例#10
0
def beat_track(x, feature=LogFrequencySpectrum, **kwargs):
    """
    Scheirer beat tracker. Use output of comb filter bank on filterbank
                           sub-bands to estimate tempo, and comb filter state
                           to track beats.
    inputs:
       x        - the audio signal or filename to analyze
       feature  - the feature class to use [LogFrequencySpectrum]
       **kwargs - parameters to the feature extractor [nbpo=1, nhop=441]
    outputs:
       z      - per-tempo comb filter summed outputs
       tempos - the range of tempos in z
       D      - the differentiated half-wave rectified octave-band filterbank
                outputs
    """
    kwargs.setdefault('nhop', 441)
    kwargs.setdefault('nbpo', 1)
    F = feature(x, **kwargs)
    frame_rate = F.sample_rate / float(F.nhop)
    D = diff(F.X, axis=1)
    D[where(D < 0)] = 0
    tempos = range(40, 200, 4)
    z = zeros((len(tempos), D.shape[0]))
    for i, bpm in enumerate(tempos):  # loop over tempos to test
        t = int(round(frame_rate * 60. / bpm))  # num frames per beat
        alpha = 0.5**(2.0/t)
        b = [1 - alpha]
        a = zeros(t)
        a[0] = 1.0
        a[-1] = alpha
        z[i, :] = lfilter(b, a, D).sum(1)  # filter and sum sub-band onsets
    return z, tempos, D
示例#11
0
 def set_pdf(self, x, p, Nrl = 1000):
   """Generate the lookup tables. 
   x is the value of the random variate
   pdf is its probability density
   cdf is the cumulative pdf
   inversecdf is the inverse look up table
   
   """
   
   self.x = x
   self.pdf = p/p.sum() #normalize it
   self.cdf = self.pdf.cumsum()
   self.inversecdfbins = Nrl
   self.Nrl = Nrl
   y = pylab.arange(Nrl)/float(Nrl)
   delta = 1.0/Nrl
   self.inversecdf = pylab.zeros(Nrl)    
   self.inversecdf[0] = self.x[0]
   cdf_idx = 0
   for n in xrange(1,self.inversecdfbins):
     while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
       cdf_idx += 1
     self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1]) 
     if cdf_idx >= Nrl:
       break
   self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
示例#12
0
 def _phase_map(self):
     self.dphi = (2*P.pi * self.nhop * P.arange(self.nfft/2+1)) / self.nfft
     A = P.diff(P.angle(self.STFT),1) # Complete Phase Map
     U = P.c_[P.angle(self.STFT[:,0]), A - P.matrix(self.dphi).T ]
     U = U - P.np.round(U/(2*P.pi))*2*P.pi
     self.dPhi = U
     return U
示例#13
0
文件: icsd.py 项目: Junji110/iCSD
 def calc_f_matrix(self):
     '''Calculate F-matrix for step iCSD method'''
     el_len = self.coord_electrode.size
     h_val = abs(pl.diff(self.coord_electrode)[0])
     self.f_matrix = pl.zeros((el_len, el_len))
     for j in xrange(el_len):
         for i in xrange(el_len):
             if i != 0:
                 lower_int = self.coord_electrode[i] - \
                     (self.coord_electrode[i] - \
                      self.coord_electrode[i - 1]) / 2
             else:
                 lower_int = pl.array([0, self.coord_electrode[i] - \
                                       h_val/2]).max()
             if i != el_len-1:
                 upper_int = self.coord_electrode[i] + \
                     (self.coord_electrode[i + 1] - \
                      self.coord_electrode[i]) / 2
             else:
                 upper_int = self.coord_electrode[i] + h_val / 2
             
             self.f_matrix[j, i] = si.quad(self.f_cylinder, a=lower_int, \
                         b=upper_int, args=(self.coord_electrode[j]), \
                         epsabs=self.tol)[0] + \
                 (self.cond - self.cond_top) / (self.cond + self.cond_top) *\
                 si.quad(self.f_cylinder, a=lower_int, b=upper_int, \
                         args=(-self.coord_electrode[j]), \
                         epsabs=self.tol)[0]
示例#14
0
 def _pvoc2(self, X_hat, Phi_hat=None, R=None):
     """
     ::
       alternate (batch) implementation of phase vocoder - time-stretch
       inputs:
         X_hat - estimate of signal magnitude
         [Phi_hat] - estimate of signal phase
         [R] - resynthesis hop ratio
       output:
         updates self.X_hat with modified complex spectrum
     """
     N, W, H = self.nfft, self.wfft, self.nhop
     R = 1.0 if R is None else R
     dphi = P.atleast_2d((2*P.pi * H * P.arange(N/2+1)) / N).T
     print "Phase Vocoder Resynthesis...", N, W, H, R
     A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
     U = P.diff(A,1) - dphi
     U = U - P.np.round(U/(2*P.pi))*2*P.pi
     t = P.arange(0,n_cols,R)
     tf = t - P.floor(t)
     phs = P.c_[A[:,0], U] 
     phs += U[:,idx[1]] + dphi # Problem, what is idx ?
     Xh = (1-tf)*Xh[:-1] + tf*Xh[1:]
     Xh *= P.exp( 1j * phs)
     self.X_hat = Xh
示例#15
0
 def _pvoc2(self, X_hat, Phi_hat=None, R=None):
     """
     ::
       alternate (batch) implementation of phase vocoder - time-stretch
       inputs:
         X_hat - estimate of signal magnitude
         [Phi_hat] - estimate of signal phase
         [R] - resynthesis hop ratio
       output:
         updates self.X_hat with modified complex spectrum
     """
     N, W, H = self.nfft, self.wfft, self.nhop
     R = 1.0 if R is None else R
     dphi = P.atleast_2d((2 * P.pi * H * P.arange(N / 2 + 1)) / N).T
     print("Phase Vocoder Resynthesis...", N, W, H, R)
     A = P.angle(self.STFT) if Phi_hat is None else Phi_hat
     U = P.diff(A, 1) - dphi
     U = U - P.np.round(U / (2 * P.pi)) * 2 * P.pi
     t = P.arange(0, n_cols, R)
     tf = t - P.floor(t)
     phs = P.c_[A[:, 0], U]
     phs += U[:, idx[1]] + dphi  # Problem, what is idx ?
     Xh = (1 - tf) * Xh[:-1] + tf * Xh[1:]
     Xh *= P.exp(1j * phs)
     self.X_hat = Xh
示例#16
0
def calcAUC(data, y0, lag, mgr, asym, time):
    """
    Calculate the area under the curve of the logistic function
    using its integrated formula
    [ A( [A-y0] log[ exp( [4m(l-t)/A]+2 )+1 ]) / 4m ] + At
    """

    # First check that max growth rate is not zero
    # If so, calculate using the data instead of the equation
    if mgr == 0:
        auc = calcAUCData(data, time)
    else:
        timeS = time[0]
        timeE = time[-1]
        t1 = asym - y0
        #try:
        t2_s = py.log(py.exp((4 * mgr * (lag - timeS) / asym) + 2) + 1)
        t2_e = py.log(py.exp((4 * mgr * (lag - timeE) / asym) + 2) + 1)
        #except RuntimeWarning as rw:
            # Exponent is too large, setting to 10^3
        #    newexp = 1000
        #    t2_s = py.log(newexp + 1)
        #    t2_e = py.log(newexp + 1)
        t3 = 4 * mgr
        t4_s = asym * timeS
        t4_e = asym * timeE

        start = (asym * (t1 * t2_s) / t3) + t4_s
        end = (asym * (t1 * t2_e) / t3) + t4_e
        auc = end - start

    if py.absolute(auc) == float('Inf'):
        x = py.diff(time)
        auc = py.sum(x * data[1:])
    return auc
示例#17
0
    def set_pdf(self, x, p, Nrl=1000):
        """Generate the lookup tables. 
    x is the value of the random variate
    pdf is its probability density
    cdf is the cumulative pdf
    inversecdf is the inverse look up table
    
    """

        self.x = x
        self.pdf = p / p.sum()  #normalize it
        self.cdf = self.pdf.cumsum()
        self.inversecdfbins = Nrl
        self.Nrl = Nrl
        y = pylab.arange(Nrl) / float(Nrl)
        delta = 1.0 / Nrl
        self.inversecdf = pylab.zeros(Nrl)
        self.inversecdf[0] = self.x[0]
        cdf_idx = 0
        for n in xrange(1, self.inversecdfbins):
            while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
                cdf_idx += 1
            self.inversecdf[n] = self.x[cdf_idx - 1] + (
                self.x[cdf_idx] - self.x[cdf_idx - 1]) * (y[n] - self.cdf[
                    cdf_idx - 1]) / (self.cdf[cdf_idx] - self.cdf[cdf_idx - 1])
            if cdf_idx >= Nrl:
                break
        self.delta_inversecdf = pylab.concatenate(
            (pylab.diff(self.inversecdf), [0]))
示例#18
0
def remove_discontinuity(value, xgap=10, ygap=200):
    """
    Remove discontinuity (sudden jump) in a series of values.
    Written by Denis, developed for LLC Fringe Counts data.
    value : list or numpy.array
    xgap  : "width" of index of the list/array to adjust steps
    ygap  : threshold value to detect discontinuity
    """
    difflist = pl.diff(value)
    discont_index = pl.find(abs(difflist) > ygap)

    if len(discont_index) == 0:
        return value
    else:
        discont_index = pl.append(discont_index, len(difflist))

    # find indice at discontinuities
    discont = {"start": [], "end": []}
    qstart = discont_index[0]
    for i in range(len(discont_index) - 1):
        if discont_index[i + 1] - discont_index[i] > xgap:
            qend = discont_index[i]
            discont["start"].append(qstart - xgap)
            discont["end"].append(qend + xgap)
            qstart = discont_index[i + 1]

    # add offsets at discontinuities
    result = pl.array(value)
    for i in range(len(discont["end"])):
        result[0 : discont["start"][i]] += result[discont["end"][i]] - result[discont["start"][i]]

    # remove the median
    result = result - pl.median(result)
    return result
示例#19
0
 def _phase_map(self):
     self.dphi = (2*P.pi * self.nhop * P.arange(self.nfft/2+1)) / self.nfft
     A = P.diff(P.angle(self.STFT),1) # Complete Phase Map
     U = P.c_[P.angle(self.STFT[:,0]), A - P.atleast_2d(self.dphi).T ]
     U = U - P.np.round(U/(2*P.pi))*2*P.pi
     self.dPhi = U
     return U
示例#20
0
文件: icsd.py 项目: Junji110/iCSD
    def calc_k_matrix(self):
        '''Calculate the K-matrix used by to calculate E-matrices'''
        el_len = self.coord_electrode.size
        # expanding electrode grid
        z_js = pl.zeros(el_len+2)
        z_js[1:-1] = self.coord_electrode
        z_js[-1] = self.coord_electrode[-1] + \
            pl.diff(self.coord_electrode).mean()
        
        c_vec = 1./pl.diff(z_js)
        # Define transformation matrices
        c_jm1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_j0 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_jall = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        c_mat3 = pl.matrix(pl.zeros((el_len+1, el_len+1)))
        
        for i in xrange(el_len+1):
            for j in xrange(el_len+1):
                if i == j:
                    c_jm1[i+1, j+1] = c_vec[i]
                    c_j0[i, j] = c_jm1[i+1, j+1]
                    c_mat3[i, j] = c_vec[i]
        
        c_jm1[-1, -1] = 0
        
        c_jall = c_j0
        c_jall[0, 0] = 1
        c_jall[-1, -1] = 1
        
        c_j0 = 0
        
        tjp1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        tjm1 = pl.matrix(pl.zeros((el_len+2, el_len+2)))
        tj0 = pl.matrix(pl.eye(el_len+2))
        tj0[0, 0] = 0
        tj0[-1, -1] = 0

        for i in xrange(1, el_len+2):
            for j in xrange(el_len+2):
                if i == j-1:
                    tjp1[i, j] = 1
                elif i == j+1:
                    tjm1[i, j] = 1
        
        # Defining K-matrix used to calculate e_mat1-3
        return (c_jm1*tjm1 + 2*c_jm1*tj0 + 2*c_jall + c_j0*tjp1)**-1 * 3 * \
            (c_jm1**2 * tj0 - c_jm1**2 * tjm1 + c_j0**2 * tjp1 - c_j0**2 * tj0)
示例#21
0
文件: utils.py 项目: aflaxman/gbd
 def unimodal_rate(f=rate, age_indices=age_indices, tau=1.e5):
     df = pl.diff(f[age_indices])
     sign_changes = pl.find((df[:-1] > NEARLY_ZERO) & (df[1:] < -NEARLY_ZERO))
     sign = pl.ones(len(age_indices)-2)
     if len(sign_changes) > 0:
         change_age = sign_changes[len(sign_changes)/2]
         sign[change_age:] = -1.
     return -tau*pl.dot(pl.absolute(df[:-1]), (sign * df[:-1] < 0))
示例#22
0
def testMonotonicIncrease(aRRay):
    if len(gr.find(gr.diff(aRRay)<0))>0:
        print 'The array is NOT monotonic.'
        x=0
    else: 
        print 'The array is non-decreasing.'
        x=1
    return x
示例#23
0
文件: eye.py 项目: jeronimozc/neurapy
def eye_sample_insert_interval(R):
    tt = R.data['Trials']['eyeXData']['Trial Time']
    n_trials = len(tt)
    d_esii = pylab.array([], dtype=float)
    for tr in range(n_trials):
        d_esii = pylab.concatenate((d_esii, pylab.diff(tt[tr])))

    return d_esii
    def analyzeSound(self):
        """ highlights N first peaks in frequency diagram
        """
        # on recharge les données 
        data = self.data
        sample_freq = self.sample_freq
        from scipy.fftpack import fftfreq
        freq_vect = fftfreq(data.size) * sample_freq
        
        # on trouve les maxima
        y0 = abs(fft(data))
#        y1 = abs(fft(data[:, 1]))
        maxi0 = ((diff(sign(diff(y0))) < 0) & (y0[1:-1] > y0.max()/10.)).nonzero()[0] + 1 # local max
        # maxi1 = ((diff(sign(diff(y1))) < 0) & (y1[1:-1] > y1.max()/10.)).nonzero()[0] + 1 # local max
        
        # fréquence
#        ax = self.main_figure.figure.add_subplot(212)
#        ax.plot(freq_vect[maxi0], y0[maxi0], "o")
#        # ax.plot(freq_vect[maxi1], y1[maxi1], "o")
        
        # annotations au dessus d'une fréquence de coupure
        fc = 100.
        max_freq = []
        freq_vect_2 = freq_vect[maxi0]
        maxy0 = y0[maxi0]
        maxy0_list = maxy0.tolist()
        while np.size(max_freq) < 12 :
            F = np.argmax(maxy0_list)
            maxy0_list[F] = 0
            print(freq_vect_2[F])
            if np.abs(freq_vect_2[F]) > fc :
                max_freq.append(F)
            for i in range(0,4) :
                if (F+2-i) in range (1,len(maxy0_list)) and np.abs(freq_vect_2[F+2-i]-freq_vect_2[F]) < 4 :
                    maxy0_list[F+2-i] = 0
                    
        print(freq_vect_2[max_freq])
        for point in max_freq:
            plt.annotate("%.2f" % freq_vect_2[point], (freq_vect_2[point], maxy0[point]))
        ax = self.main_figure.figure.add_subplot(212)
        ax.plot(freq_vect_2[max_freq], maxy0[max_freq], "o")
        
#        for point in maxi1[(freq_vect[maxi0] > fc).nonzero()][:self.ui.spinBox.value()]:
#            plt.annotate("%.2f" % freq_vect[point], (freq_vect[point], y1[point]))
#        
        self.ui.main_figure.canvas.draw()
示例#25
0
文件: eye.py 项目: kghose/neurapy
def eye_sample_insert_interval(R):
  tt = R.data['Trials']['eyeXData']['Trial Time']  
  n_trials = len(tt)
  d_esii = pylab.array([],dtype=float)
  for tr in range(n_trials):
    d_esii = pylab.concatenate((d_esii,pylab.diff(tt[tr])))

  return d_esii
示例#26
0
 def mu_age_derivative_potential(mu_age=mu_age,
                                 increasing_a0=pl.clip(parameters['increasing']['age_start']-ages[0], 0, len(ages)),
                                 increasing_a1=pl.clip(parameters['increasing']['age_end']-ages[0], 0, len(ages)),
                                 decreasing_a0=pl.clip(parameters['decreasing']['age_start']-ages[0], 0, len(ages)),
                                 decreasing_a1=pl.clip(parameters['decreasing']['age_end']-ages[0], 0, len(ages))):
     mu_prime = pl.diff(mu_age)
     inc_violation = mu_prime[increasing_a0:increasing_a1].clip(-pl.inf, 0.).sum()
     dec_violation = mu_prime[decreasing_a0:decreasing_a1].clip(0., pl.inf).sum()
     return -1.e12 * (inc_violation**2 + dec_violation**2)
示例#27
0
文件: logdata.py 项目: Fab7c4/cletus
	def getTimeHistogramm(self, color, name):
		diffs = plt.diff(self.times)
		## compute standard histogram
		y,x = plt.histogram(diffs, bins=plt.linspace(diffs.min(), diffs.max(), 500))

		## notice that len(x) == len(y)+1
		## We are required to use stepMode=True so that PlotCurveItem will interpret this data correctly.
		curve = pg.PlotCurveItem(x, y, stepMode=True, fillLevel=0, pen=color, brush=color, name=name)
		return curve
def smooth(y,smoothBeta=smoothBeta):
    m = len(y)
    
    p = pl.diff(pl.eye(m),3).transpose()
    A = pl.eye(m)+smoothBeta*pl.dot(p.transpose(),p)
    
    smoothY = pl.solve(A, y)
    
    return smoothY
示例#29
0
def spikecv(timestamps,
            start_time=0,
            zero_times=0,
            end_time=None,
            window_len=.1):
    """Given the time stamps compute the coefficient of variation with a jumping window.
  Returns cv and rate as an array.
  Inputs:
    timestamps - the spike timestamps
    start_time - time rel to zero_time we end our windows (needs to be <= 0).
                 If zero, means no pre windows.
                 If None, means prewindows stretch to begining of data
                 The start_time is extended to include an integer number of windows
    zero_times  - reference time. Can be a zx1 array, in which case will give us an array of windows. If scalar
                 will only give one set of windows.
    end_time   - time rel to zero_time we end our windows (needs to >= 0)
                 If zero, means no post-windows
                 If None, means post-windows stretch to end of data
                 The end_time is extended to include an integer number of windows
    window_len - length of window to look at spikes (in same units as time stamps)

  Outputs:
    t  - time of the center of the window
    cv
    rate - in inverse units of timestamp
  """

    window_edges, windows, subwindows = window_spike_train(
        timestamps, start_time, zero_times, end_time, window_len=window_len)
    isi = pylab.diff(timestamps)
    if windows.shape[1]:
        windows[:, -1, 1] -= 1  #we have one less isi sample than timestamps

    t = pylab.zeros(windows.shape[1])
    cv = pylab.zeros(windows.shape[1])
    rate = pylab.zeros(windows.shape[1])

    for n in xrange(windows.shape[1]):
        collected_isi = pylab.array([])
        for m in xrange(windows.shape[0]):
            #CV computation
            collected_isi = pylab.concatenate(
                (collected_isi, isi[windows[m, n, 0]:windows[m, n, 1]]))

        if collected_isi.size > 0:
            mean = collected_isi.mean()
            std = collected_isi.std()
            cv[n] = std / mean
            rate[n] = 1. / mean
        else:
            cv[n] = 0
            rate[n] = 0

        #t[n] = window_len * (n + .5)
    t = (window_edges[1:] + window_edges[:-1]) / 2
    return t, cv, rate
示例#30
0
def getDataDays(fname):
    f1 = open(fname)
    #Add data to array
    z2=  datetime(2000,1,1)
    z1 = datetime(2016,1,1)

    j = []
    sizeEvent = []
    if 'SATP' in fname:
        for line in f1:
            a =line.split(',')

            if a:
                if contains_digits(a[0]) and len(a)>3 and int(a[3])>0:
                    dat = datetime.strptime(a[0],"%d %m %Y")
                    if  dat< z1 and dat>z2:
                        j.append(dat)
                        sizeEvent.append(int(a[3]))
    elif 'YemenNATSEC' in fname:
        for line in f1:
            a =line.split(',')
            if a:

                if contains_digits(a[0]) and len(a)>3 and int(eval(a[3])/2)>0:# and int(eval(a[2])/2)<35:
                    dat = datetime.strptime(a[0],"%d %m %Y")
                    if  dat< z1 and dat>z2:
                        j.append(dat)
                        sizeEvent.append(int(eval(a[3])/2))

    elif 'PakNATSEC' in fname:
        for line in f1:
            a =line.split(',')
            if a:

                if contains_digits(a[0]) and len(a)>3 and int(eval(a[3])/2)>0 :# and int(eval(a[2])/2)<35: ' and 'South Waziristan' in a[1]
                    dat = datetime.strptime(a[0],"%d %m %Y")
                    if  dat< z1 and dat>z2:
                        j.append(dat)
                        sizeEvent.append(int(eval(a[3])/2))

    ddd = j


    a = [n.total_seconds()/60/60/24 for n in diff(j)]
    daysBetweenKills = np.asarray(a)

    daysBetweenKills[daysBetweenKills==0] += 0.1

    #Separate attacks in the same day
    while(np.any(np.diff(daysBetweenKills)==0)):
        daysBetweenKills[np.concatenate([[False],np.diff(daysBetweenKills)==0])] += 0.1


    daysBetweenAttacks = daysBetweenKills

    return [ddd,np.asarray(sizeEvent),np.asarray(daysBetweenAttacks)]
示例#31
0
 def unimodal_rate(f=rate, age_indices=age_indices, tau=1.e5):
     df = pl.diff(f[age_indices])
     sign_changes = pl.find((df[:-1] > NEARLY_ZERO)
                            & (df[1:] < -NEARLY_ZERO))
     sign = pl.ones(len(age_indices) - 2)
     if len(sign_changes) > 0:
         change_age = sign_changes[len(sign_changes) / 2]
         sign[change_age:] = -1.
     return -tau * pl.dot(pl.absolute(df[:-1]),
                          (sign * df[:-1] < 0))
示例#32
0
文件: logdata.py 项目: Fab7c4/cletus
	def calcFrequencies(self, times):
		self.times = times
		diffs = plt.diff(times)
		self.total_time = times[-1] - times[0]
		self.mean_period =  plt.mean(diffs) 
		self.max_period = diffs.max() 
		self.min_period = diffs.min()
		self.max_frequency = 1/ self.min_period
		self.min_frequency = 1 / self.max_period
		self.mean_frequency = 1 / self.mean_period
示例#33
0
文件: icsd.py 项目: Junji110/iCSD
    def calc_e_matrices(self):
        '''Calculate the E-matrices used by cubic spline iCSD method'''
        el_len = self.coord_electrode.size
        ## expanding electrode grid
        z_js = pl.zeros(el_len+2)
        z_js[1:-1] = self.coord_electrode
        z_js[-1] = self.coord_electrode[-1] + \
            pl.diff(self.coord_electrode).mean()
        
        ## Define transformation matrices
        c_mat3 = pl.matrix(pl.zeros((el_len+1, el_len+1)))
        
        for i in xrange(el_len+1):
            for j in xrange(el_len+1):
                if i == j:
                    c_mat3[i, j] = 1./pl.diff(z_js)[i]

        # Get K-matrix
        k_matrix = self.calc_k_matrix()
        
        # Define matrixes for C to A transformation:
        # identity matrix except that it cuts off last element:
        tja = pl.matrix(pl.zeros((el_len+1, el_len+2)))
        # converting k_j to k_j+1 and cutting off last element:
        tjp1a = pl.matrix(pl.zeros((el_len+1, el_len+2))) 

        # C to A
        for i in xrange(el_len+1):
            for j in xrange(el_len+2):
                if i == j-1:
                    tjp1a[i, j] = 1
                elif i == j:
                    tja[i, j] = 1
        
        # Define spline coeffiscients
        e_mat0 = tja    
        e_mat1 = tja*k_matrix
        e_mat2 = 3 * c_mat3**2 * (tjp1a-tja) - c_mat3 * \
                (tjp1a + 2 * tja) * k_matrix
        e_mat3 = 2 * c_mat3**3 * (tja-tjp1a) + c_mat3**2 * \
                (tjp1a + tja) * k_matrix
        
        return e_mat0, e_mat1, e_mat2, e_mat3
示例#34
0
 def getPEA(self, leg):
     t, X, Y = self._loadtrace(pjoin(self.get_tsv_path(), leg + '.tsv'))
     bccXY = self._bcc(X, Y,
                       (img_w / 2 * FLY_LENGTH / self.meanFlyLength_px,
                        img_h / 2 * FLY_LENGTH / self.meanFlyLength_px))
     X, Y = zip(*bccXY)
     swtl = self._getSwingTaggedList(X)
     dswtl = [0] + list(diff(swtl))
     starts = [k - 1 for k in range(len(dswtl)) if dswtl[k] == 1]
     return [self._computeAngle(X[start], Y[start]) for start in starts]
示例#35
0
 def getAEPx(self, leg):
     t, X, Y = self._loadtrace(pjoin(self.get_tsv_path(), leg + '.tsv'))
     bccXY = self._bcc(X, Y,
                       (img_w / 2 * FLY_LENGTH / self.meanFlyLength_px,
                        img_h / 2 * FLY_LENGTH / self.meanFlyLength_px))
     X, Y = zip(*bccXY)
     swtl = self._getSwingTaggedList(X)
     dswtl = [0] + list(diff(swtl))
     ends = [k for k in range(len(dswtl)) if dswtl[k] == -1]
     return [X[end] for end in ends]
示例#36
0
文件: icsd.py 项目: Junji110/iCSD
 def calc_f_matrix(self):
     '''Calculate the F-matrix for cubic spline iCSD method'''
     el_len = self.coord_electrode.size
     z_js = pl.zeros(el_len+2)
     z_js[1:-1] = self.coord_electrode
     z_js[-1] = z_js[-2] + pl.diff(self.coord_electrode).mean()
     
     # Define integration matrixes
     f_mat0 = pl.matrix(pl.zeros((el_len, el_len+1)))
     f_mat1 = pl.matrix(pl.zeros((el_len, el_len+1)))
     f_mat2 = pl.matrix(pl.zeros((el_len, el_len+1)))
     f_mat3 = pl.matrix(pl.zeros((el_len, el_len+1)))
     
     # Calc. elements
     for j in xrange(el_len):
         for i in xrange(el_len):
             f_mat0[j, i] = si.quad(self.f_mat0, a=z_js[i], b=z_js[i+1], \
                 args=(z_js[j+1]), epsabs=self.tol)[0]
             f_mat1[j, i] = si.quad(self.f_mat1, a=z_js[i], b=z_js[i+1], \
                                args=(z_js[j+1], z_js[i]), \
                                     epsabs=self.tol)[0]
             f_mat2[j, i] = si.quad(self.f_mat2, a=z_js[i], b=z_js[i+1], \
                                args=(z_js[j+1], z_js[i]), \
                                     epsabs=self.tol)[0]
             f_mat3[j, i] = si.quad(self.f_mat3, a=z_js[i], b=z_js[i+1], \
                                args=(z_js[j+1], z_js[i]), \
                                     epsabs=self.tol)[0]
             # image technique if conductivity not constant:
             if self.cond != self.cond_top:    
                 f_mat0[j, i] = f_mat0[j, i] + (self.cond-self.cond_top) / \
                     (self.cond + self.cond_top) * \
                         si.quad(self.f_mat0, a=z_js[i], b=z_js[i+1], \
                             args=(-z_js[j+1]), \
                                 epsabs=self.tol)[0]
                 f_mat1[j, i] = f_mat1[j, i] + (self.cond-self.cond_top) / \
                     (self.cond + self.cond_top) * \
                         si.quad(self.f_mat1, a=z_js[i], b=z_js[i+1], \
                             args=(-z_js[j+1], z_js[i]), epsabs=self.tol)[0]
                 f_mat2[j, i] = f_mat2[j, i] + (self.cond-self.cond_top) / \
                     (self.cond + self.cond_top) * \
                         si.quad(self.f_mat2, a=z_js[i], b=z_js[i+1], \
                             args=(-z_js[j+1], z_js[i]), epsabs=self.tol)[0]
                 f_mat3[j, i] = f_mat3[j, i] + (self.cond-self.cond_top) / \
                     (self.cond + self.cond_top) * \
                         si.quad(self.f_mat3, a=z_js[i], b=z_js[i+1], \
                             args=(-z_js[j+1], z_js[i]), epsabs=self.tol)[0]
     
     e_mat0, e_mat1, e_mat2, e_mat3 = self.calc_e_matrices()
     
     # Calculate the F-matrix
     self.f_matrix = pl.matrix(pl.zeros((el_len+2, el_len+2)))
     self.f_matrix[1:-1, :] = f_mat0*e_mat0 + f_mat1*e_mat1 + \
                             f_mat2*e_mat2 + f_mat3*e_mat3
     self.f_matrix[0, 0] = 1
     self.f_matrix[-1, -1] = 1
示例#37
0
文件: icsd.py 项目: Junji110/iCSD
 def calc_f_inv_matrix(self):
     '''Calculate the inverse F-matrix for the standard CSD method'''
     h_val = abs(pl.diff(self.coord_electrode)[0])
     
     #Inner matrix elements  is just the discrete laplacian coefficients
     self.f_inv_matrix[0, 0] = -1
     for j in xrange(1, self.f_inv_matrix.shape[0]-1):
         self.f_inv_matrix[j, j-1:j+2] = [1., -2., 1.]
     self.f_inv_matrix[-1, -1] = -1
     
     self.f_inv_matrix = self.f_inv_matrix * -self.cond / h_val**2
示例#38
0
def step(Signal=None, Shift='>'):
    """
		Find the indexes where a dirac, unit step or unit pulse function in the input 
		signal rises or falls.

		This implementation is based on the discrete difference of the input signal.

		Parameters
		__________
		Signal : ndarray
			Input signal data with a dirac, unit step or unit pulse function.
		Shift : str
			Direction of detection, `>` for rise or `<` for fall. 
			Default: `>`

		Returns
		_______
		kwrvals : dict
			A keyworded return values dict is returned with the following keys: 
			Event : ndarray
				The indexes within the input signal `Signal` where the `Shift` was detected.

		See Also
		________
			sync.threshold
			sync.match 
		
		Example
		_______
		x = zeros(6)
		x[2:4] = 1
		plot(x)
		vlines(step(x)['Events'],min(x),max(x),'r','dashed')
		vlines(step(x,'<')['Events'],min(x),max(x),'g','dashed')
		legend(('unit pulse','rise', 'fall'))

		References
		__________
		.. [1] Wikipedia, "Dirac Delta Function". 
		http://en.wikipedia.org/wiki/Dirac_delta_function
		.. [2] Wikipedia, "Heaviside Step Function".
		http://en.wikipedia.org/wiki/Heaviside_step_function
		.. [3] Wikipedia, "Unit Pulse Function".
		http://en.wikipedia.org/wiki/Rectangular_function
	"""
    # Check
    if Signal is None:
        raise TypeError, "An input signal is needed."
    #
    th = (1 if Shift == '>' else -1)
    kwrvals = {}
    kwrvals['Event'] = pl.find(pl.diff(Signal) == th)

    return kwrvals
示例#39
0
def statistical_distribution(samples, sets):

	X = pylab.standard_normal((samples, sets))
	s = pylab.sum(X * X, axis=0)

	(n, bins, patches) = pylab.hist(s, bins=100)

	x = pylab.linspace(0, max(s), 100)
	y = scipy.stats.chi2.pdf(x,samples) * sets * pylab.diff(bins)[0]

	pylab.plot(x, y, "y", linewidth=5)
	pylab.show()
示例#40
0
def unwrap(xs, min_value, max_value, in_place=False, jump_fraction=0.5):
    range_ = max_value - min_value
    jump_threshold = range_ * jump_fraction
    diffs = pl.diff(xs)
    octave_diffs = pl.zeros(len(xs) - 1, dtype=pl.int64)
    octave_diffs[diffs > jump_threshold] = -1
    octave_diffs[diffs < -jump_threshold] = 1
    octaves = pl.append(0, pl.cumsum(octave_diffs))
    if in_place:
        xs += octaves * range_
    else:
        return xs + octaves * range_
示例#41
0
文件: spikes.py 项目: kghose/neurapy
def spikecv(timestamps, start_time=0, zero_times=0, end_time=None, window_len=.1):
  """Given the time stamps compute the coefficient of variation with a jumping window.
  Returns cv and rate as an array.
  Inputs:
    timestamps - the spike timestamps
    start_time - time rel to zero_time we end our windows (needs to be <= 0).
                 If zero, means no pre windows.
                 If None, means prewindows stretch to begining of data
                 The start_time is extended to include an integer number of windows
    zero_times  - reference time. Can be a zx1 array, in which case will give us an array of windows. If scalar
                 will only give one set of windows.
    end_time   - time rel to zero_time we end our windows (needs to >= 0)
                 If zero, means no post-windows
                 If None, means post-windows stretch to end of data
                 The end_time is extended to include an integer number of windows
    window_len - length of window to look at spikes (in same units as time stamps)

  Outputs:
    t  - time of the center of the window
    cv
    rate - in inverse units of timestamp
  """

  window_edges, windows, subwindows = window_spike_train(timestamps, start_time, zero_times, end_time, window_len=window_len)
  isi = pylab.diff(timestamps)
  if windows.shape[1]:
    windows[:,-1,1] -= 1 #we have one less isi sample than timestamps

  t = pylab.zeros(windows.shape[1])
  cv = pylab.zeros(windows.shape[1])
  rate = pylab.zeros(windows.shape[1])

  for n in xrange(windows.shape[1]):
    collected_isi = pylab.array([])
    for m in xrange(windows.shape[0]):
      #CV computation
      collected_isi = pylab.concatenate((collected_isi, isi[windows[m,n,0]:windows[m,n,1]]))

    if collected_isi.size > 0:
      mean = collected_isi.mean()
      std = collected_isi.std()
      cv[n] = std/mean
      rate[n] = 1./mean
    else:
      cv[n] = 0
      rate[n] = 0

    #t[n] = window_len * (n + .5)
  t = (window_edges[1:] + window_edges[:-1])/2
  return t, cv, rate
示例#42
0
文件: icsd.py 项目: Junji110/iCSD
 def calc_f_matrix(self):
     '''Calculate the F-matrix'''
     h_val = abs(pl.diff(self.coord_electrode)[0])
     
     for j in xrange(self.coord_electrode.size):
         for i in xrange(self.coord_electrode.size):
             self.f_matrix[j, i] = h_val / (2 * self.cond) * \
                 ((pl.sqrt((self.coord_electrode[j] - \
                 self.coord_electrode[i])**2 + (self.diam / 2)**2) - \
                 abs(self.coord_electrode[j] - self.coord_electrode[i])) +\
                 (self.cond - self.cond_top) / (self.cond + self.cond_top) *\
                 (pl.sqrt((self.coord_electrode[j] + \
                 self.coord_electrode[i])**2 + (self.diam / 2)**2) - \
                 abs(self.coord_electrode[j] + self.coord_electrode[i])))
示例#43
0
def rejectInterlopers(data):
    ''' Does all of the work to figure out which galaxies don't belong. Makes
    several sorted copies of the dataframe and then applies the fixed gapper
    method.

    '''

    # make some copies so we can sort them around
    sepSorted = data.sort('seperation', ascending=True)
    # How many parts to break into
    parts = len(data) // 15
    splitData = split_list(data, parts)

    # Now we sort the parts by LOSV and find the rejects
    interlopers = []
    for part in splitData:
        # sort by LOSV
        LOSVsorted = part.sort('LOSV', ascending=True)
        rejected = True
        while rejected:
            # Find the difference between all of the neighboring elements
            difference = pyl.diff(LOSVsorted['LOSV'])
            # If diff > 1000 reject
            rejects = abs(difference) > 1000
            # Now remove those items
            indices = pyl.where(rejects == True)
            #print LOSVsorted['LOSV']
            #print difference
            #print indices[0]
            if rejects.any() == True:
                # Always take the more extreme index
                for index, i in enumerate(indices[0]):
                    if (abs(LOSVsorted['LOSV'][LOSVsorted.index[i]]) -
                            abs(LOSVsorted['LOSV'][LOSVsorted.index[i + 1]])
                        ) > 0:
                        pass
                    elif (abs(LOSVsorted['LOSV'][LOSVsorted.index[i]]) -
                          abs(LOSVsorted['LOSV'][LOSVsorted.index[i + 1]])
                          ) < 0:
                        indices[0][index] = i + 1

                #print LOSVsorted.index[list(indices[0])]
                dataframeIndex = list(LOSVsorted.index[list(indices[0])])
                LOSVsorted = LOSVsorted.drop(dataframeIndex)
                interlopers += dataframeIndex
            else:
                rejected = False
    print 'interlopers', interlopers
    return data.drop(interlopers)
示例#44
0
def spline(name, ages, knots, smoothing, interpolation_method='linear'):
    """ Generate PyMC objects for a spline model of age-specific rate

    Parameters
    ----------
    name : str
    knots : array
    ages : array, points to interpolate to
    smoothing : pymc.Node, smoothness parameter for smoothing spline
    interpolation_method : str, optional, one of 'linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'

    Results
    -------
    Returns dict of PyMC objects, including 'gamma' (log of rate at
    knots) and 'mu_age' (age-specific rate interpolated at all age
    points)
    """
    assert pl.all(pl.diff(knots) > 0), 'Spline knots must be strictly increasing'

    # TODO: consider changing this prior distribution to be something more familiar in linear space
    gamma = [mc.Normal('gamma_%s_%d'%(name,k), 0., 10.**-2, value=-10.) for k in knots]
    #gamma = [mc.Uniform('gamma_%s_%d'%(name,k), -20., 20., value=-10.) for k in knots]

    # TODO: fix AdaptiveMetropolis so that this is not necessary
    flat_gamma = mc.Lambda('flat_gamma_%s'%name, lambda gamma=gamma: pl.array([x for x in pl.flatten(gamma)]))


    import scipy.interpolate
    @mc.deterministic(name='mu_age_%s'%name)
    def mu_age(gamma=flat_gamma, knots=knots, ages=ages):
        mu = scipy.interpolate.interp1d(knots, pl.exp(gamma), kind=interpolation_method, bounds_error=False, fill_value=0.)
        return mu(ages)

    vars = dict(gamma=gamma, mu_age=mu_age, ages=ages, knots=knots)

    if (smoothing > 0) and (not pl.isinf(smoothing)):
        #print 'adding smoothing of', smoothing
        @mc.potential(name='smooth_mu_%s'%name)
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            # TODO: consider changing this to an offset log normal
            gamma = gamma.clip(pl.log(pl.exp(gamma).mean()/10.), pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
        vars['smooth_gamma'] = smooth_gamma

    return vars
示例#45
0
def isi_histogram(timestamps,
                  start_time=0,
                  zero_times=0,
                  end_time=None,
                  window_len=1,
                  range=.2,
                  nbins=11):
    """Given the time stamps compute the isi histogram with a jumping window.
  Inputs:
    timestamps - the spike timestamps
    start_time - time rel to zero_time we end our windows (needs to be <= 0).
                 If zero, means no pre windows.
                 If None, means prewindows stretch to begining of data
                 The start_time is extended to include an integer number of windows
    zero_times  - reference time. Can be a zx1 array, in which case will give us an array of windows. If scalar
                 will only give one set of windows.
    end_time   - time rel to zero_time we end our windows (needs to >= 0)
                 If zero, means no post-windows
                 If None, means post-windows stretch to end of data
                 The end_time is extended to include an integer number of windows
    window_len - length of window to look at isi (in same units as time stamps)
    range - maximum isi
    nbins - number of bins in the histogram
  Outputs:
    t - time vector
    be - bin edges
    isihist - the histogram matrix. Normalized for each time slice

  (Can be plotted by doing pylab.pcolor(t, be, isihist.T,vmin=0,vmax=1, cmap=pylab.cm.gray_r))
  """
    window_edges, windows, subwindows = window_spike_train(
        timestamps, start_time, zero_times, end_time, window_len=window_len)
    isi = pylab.diff(timestamps)

    if windows.shape[1]:
        windows[:, -1, 1] -= 1  #we have one less isi sample than timestamps

    bins = pylab.linspace(0, range, nbins + 1)  #We are doing bin edges

    isihist = pylab.zeros((windows.shape[1], nbins))
    for m in xrange(windows.shape[0]):
        for n in xrange(windows.shape[1]):
            hist, be = pylab.histogram(isi[windows[m, n, 0]:windows[m, n, 1]],
                                       bins,
                                       density=True)
            isihist[n, :] += hist

    t = (window_edges[1:] + window_edges[:-1]) / 2
    return t, be, isihist / windows.shape[0]
示例#46
0
 def getEtalonSpacing(self):
     #this should return the frequency of the Etalon
 
 
     #how to find a stable range!
     bw=self.getBandwidth()
     rdata=self.getcroppedData(self.fdData,max(bw[0],250e9),min(bw[1],2.1e12))  
     
     #need to interpolate data!
     oldfreqs=rdata[:,0]
     intpdata=interp1d(oldfreqs,rdata[:,3],'cubic')
     
     fnew=py.arange(min(oldfreqs),max(oldfreqs),0.1e9)
     absnew=intpdata(fnew)
     #find minimia and maxima        
     ixmaxima=signal.argrelmax(absnew)[0]
     ixminima=signal.argrelmin(absnew)[0]
     
     fmaxima=py.mean(py.diff(fnew[ixmaxima]))
     fminima=py.mean(py.diff(fnew[ixminima]))
     #calculate etalon frequencies
     df=(fmaxima+fminima)*0.5 #the etalon frequencies
     print(str(df/1e9) + " GHz estimated etalon frequency")
     return df
示例#47
0
    def _extract_onsets(self):
        """
        ::

           The simplest onset detector in the world: power envelope derivative zero crossings +/-
        """
        fp = self._check_feature_params()
        if not self._have_power:
            return None
        dd = P.diff(P.r_[0, self.POWER])
        self.ONSETS = P.where((dd > 0) & (P.roll(dd, -1) < 0))[0]
        if self.verbosity:
            print("Extracted ONSETS")
        self._have_onsets = True
        return True
示例#48
0
 def _extract_onsets(self):
     """
     ::
     
        The simplest onset detector in the world: power envelope derivative zero crossings +/-
     """
     fp = self._check_feature_params()
     if not self._have_power:
         return None
     dd = P.diff(P.r_[0,self.POWER])
     self.ONSETS = P.where((dd>0) & (P.roll(dd,-1)<0))[0]
     if self.verbosity:
         print "Extracted ONSETS"
     self._have_onsets = True
     return True
def histogram_and_fit(distribution_name,
                      points,
                      bins=10,
                      units="",
                      **fit_kwargs):
    histogram = pylab.hist(points, bins)
    bins = histogram[1]
    bin_step = pylab.median(pylab.diff(bins))
    distribution = _get_distribution(distribution_name)
    fit = distribution.fit(points, **fit_kwargs)
    xs = pylab.linspace(min(bins), max(bins), 1000)
    ys = distribution.pdf(xs, *fit)
    label = _get_label(distribution_name, fit, units)
    pylab.plot(xs, ys * len(points) * bin_step, 'r', label=label)
    pylab.legend()
    return fit
示例#50
0
 def mu_age_derivative_potential(
     mu_age=mu_age,
     increasing_a0=pl.clip(parameters['increasing']['age_start'] - ages[0],
                           0, len(ages)),
     increasing_a1=pl.clip(parameters['increasing']['age_end'] - ages[0], 0,
                           len(ages)),
     decreasing_a0=pl.clip(parameters['decreasing']['age_start'] - ages[0],
                           0, len(ages)),
     decreasing_a1=pl.clip(parameters['decreasing']['age_end'] - ages[0], 0,
                           len(ages))):
     mu_prime = pl.diff(mu_age)
     inc_violation = mu_prime[increasing_a0:increasing_a1].clip(
         -pl.inf, 0.).sum()
     dec_violation = mu_prime[decreasing_a0:decreasing_a1].clip(
         0., pl.inf).sum()
     return -1.e12 * (inc_violation**2 + dec_violation**2)
示例#51
0
    def find_ne_max(self):
        if not hasattr(self, "ne_max"):
            self.ne_max = []
        zeros_gd = p.where(self.gd_m <= 0.0)[0]
#       print(max(p.diff(self.gd_m)))
        if len(zeros_gd) != 0:
            print("pequeno")
            self.ne_max.append(rf.freq2den(1e9 * self.X[zeros_gd[0]]))
            self.gd_m[zeros_gd[0]:] = float("NaN")
        else:
            dif_gd = p.where(p.diff(self.gd_m) > 3.5)[0]
            if len(dif_gd) != 0:
                print("grande")
                self.ne_max.append(rf.freq2den(1e9 * self.X[dif_gd[-1]]))
                self.gd_m[dif_gd[0]:] = float("NaN")
            else:
                self.ne_max.append(float("NaN"))
示例#52
0
def spline(name, ages, knots, smoothing, interpolation_method='linear'):
    """ Generate PyMC objects for a piecewise constant Gaussian process (PCGP) model

    Parameters
    ----------
    name : str
    knots : array, locations of the discontinuities in the piecewise constant function
    ages : array, points to interpolate to
    smoothing : pymc.Node, smoothness parameter for smoothing spline
    interpolation_method : str, optional, one of 'linear', 'nearest', 'zero', 'slinear', 'quadratic, 'cubic'

    Results
    -------
    Returns dict of PyMC objects, including 'gamma' and 'mu_age'
    the observed stochastic likelihood and data predicted stochastic
    """
    assert pl.all(pl.diff(knots) > 0), 'Spline knots must be strictly increasing'
    
    gamma = [mc.Normal('gamma_%s_%d'%(name,k), 0., 10.**-2, value=-10.) for k in knots]
    #gamma = [mc.Uniform('gamma_%s_%d'%(name,k), -20., 20., value=-10.) for k in knots]

    # TODO: fix AdaptiveMetropolis so that this is not necessary
    flat_gamma = mc.Lambda('flat_gamma_%s'%name, lambda gamma=gamma: pl.array([x for x in pl.flatten(gamma)]))


    import scipy.interpolate
    @mc.deterministic(name='mu_age_%s'%name)
    def mu_age(gamma=flat_gamma, knots=knots, ages=ages):
        mu = scipy.interpolate.interp1d(knots, pl.exp(gamma), kind=interpolation_method, bounds_error=False, fill_value=0.)
        return mu(ages)

    vars = dict(gamma=gamma, mu_age=mu_age, ages=ages, knots=knots)

    if (smoothing > 0) and (not pl.isinf(smoothing)):
        print 'adding smoothing of', smoothing
        @mc.potential(name='smooth_mu_%s'%name)
        def smooth_gamma(gamma=flat_gamma, knots=knots, tau=smoothing**-2):
            # the following is to include a "noise floor" so that level value
            # zero prior does not exert undue influence on age pattern
            # smoothing
            gamma = gamma.clip(pl.log(pl.exp(gamma).mean()/10.), pl.inf)  # only include smoothing on values within 10x of mean

            return mc.normal_like(pl.sqrt(pl.sum(pl.diff(gamma)**2 / pl.diff(knots))), 0, tau)
        vars['smooth_gamma'] = smooth_gamma

    return vars
示例#53
0
    def process_ts(self, ts):
        ''' The meat of the class -- convert an input time series into beds '''

        # Define a function to stop time points from going off the end of the array
        tlim = lambda t: pl.minimum(self.npts, t)  # Short for "time limit"

        # Housekeeping
        hsp = self.hspars  # Shorten since used a lot
        beds = sc.objdict(
        )  # To make in one step: make(keys=self.reskeys, vals=pl.zeros(self.npts))
        for reskey in self.reskeys:
            beds[reskey] = pl.zeros(self.npts)

        # If cumulative, take the difference to get the change at each timepoint
        if self.datatype == 'cumulative':
            ts = pl.diff(ts)

        # Actually process the time series -- where all the logic is, loop over each time point and update beds required
        for t, val in enumerate(ts):

            # Precompute results
            sympt = val * hsp.symptomatic  # Find how many symptomatic people there are
            hosp = sympt * hsp.hospitalized  # How many require hospitalization
            icu = sympt * hsp.icu  # How many will require ICU beds
            mild = hosp - icu  # Non-ICU patients are mild
            tstart_aac = t + hsp.delay  # When adult acute beds start being used
            tstop_aac = tstart_aac + hsp.mild_dur  # When adult acute beds are no longer needed
            icu_in_aac = round(
                hsp.severe_dur *
                hsp.aac_frac)  # Days an ICU patient spends in AAC
            icu_in_icu = hsp.severe_dur - icu_in_aac  # ...and in ICU
            tstop_pre_icu = tstart_aac + icu_in_aac  # When they move from AAC to ICU
            tstop_icu = tstop_pre_icu + icu_in_icu  # When they leave ICU

            # Compute actual results
            beds.aac[tlim(tstart_aac):tlim(
                tstop_aac)] += mild  # Add mild patients to AAC
            beds.aac[tlim(tstart_aac):tlim(
                tstop_pre_icu)] += icu  # Add pre-ICU ICU patients
            beds.icu[tlim(tstop_pre_icu):tlim(tstop_icu
                                              )] += icu  # Add ICU patients

        beds.total = beds.aac + beds.icu  # Compute total results

        return beds
示例#54
0
def get_undef_blade():
    blade = {}
    blade["tower"] = py.array(
        [[0.0, 4.15 / 2, 4.15 / 2, -4.15 / 2, -4.15 / 2, 0.0],
         [0.0, 0.0, 115.63, 115.63, 0.0, 0.0]])
    blade["shaft"] = py.array(
        [[
            blade["tower"][0, 1],
            blade["tower"][0, 1] - 7.1 * py.cos(5 * py.pi / 180)
        ],
         [
             blade["tower"][1, 2] + 2.75,
             blade["tower"][1, 2] + 2.75 + abs(7.1) * py.sin(5 * py.pi / 180)
         ]])
    shaft_tan = py.diff(blade["shaft"])
    shaft_tan = shaft_tan[0] + 1j * shaft_tan[1]
    shaft_tan /= abs(shaft_tan)
    shaft_normal = shaft_tan * 1j

    blade["hub_fun"] = lambda r: blade["shaft"][0, -1] + 1j * blade["shaft"][
        1, -1] + r * shaft_normal

    blade["hub"] = py.array(
        [[py.real(blade["hub_fun"](0)),
          py.real(blade["hub_fun"](2.8))],
         [py.imag(blade["hub_fun"](0)),
          py.imag(blade["hub_fun"](2.8))]])
    cone = -2.5 * py.pi / 180  # Cone angle
    blade_normal = (py.cos(cone) + 1j * py.sin(cone)) * shaft_normal
    blade["blade_fun"] = lambda r, R, defl: blade["hub"][0, -1] + 1j * blade[
        "hub"
    ][
        1, -1
    ] + r * blade_normal + r / R * 2.332 * blade_normal / 1j + defl * blade_normal / 1j
    R = 86.366
    blade["blade"] = py.array([[
        py.real(blade["blade_fun"](0, R, 0)),
        py.real(blade["blade_fun"](R, R, 0))
    ],
                               [
                                   py.imag(blade["blade_fun"](0, R, 0)),
                                   py.imag(blade["blade_fun"](R, R, 0))
                               ]])
    #print(py.angle(blade_normal)*180/py.pi,py.angle(shaft_normal)*180/py.pi)
    return (blade)
示例#55
0
def getDataIndividual(fname):
    f1 = open(fname)
    #Add data to array
    z2=  datetime(1932,1,1)
    z1 = datetime(2015,1,1)

    j = []

    for line in f1:
        a =line.split(',')
        if a:

            if contains_digits(a[0]) and len(a)>3:

                dat = datetime.strptime(a[0],"%d %m %Y")


                if  dat< z1 and dat>z2:
                    j.append(dat)

    sizeEvent = []
    d = 1
    ddd =[]
    count = 0


    for a in range(1,len(j)):
        if j[a] == j[a-1]:
            d +=1
        else:
            count +=1
            sizeEvent.append(d)
            ddd.append(j[a])
            d = 1
    if d>1:
        sizeEvent.append(d)
        ddd.append(j[a])


    a = [n.total_seconds()/60/60/24 for n in diff(j)]
    daysBetweenKills = np.asarray(a,dtype='int')
    daysBetweenAttacks = daysBetweenKills[daysBetweenKills>0]

    return [ddd,np.asarray(sizeEvent),daysBetweenAttacks]
示例#56
0
    def getSwingAmplitude(self, leg):
        t, X, Y = self._loadtrace(pjoin(self.get_tsv_path(), leg + '.tsv'))
        swtl = self._getSwingTaggedList(X, True)
        dswtl = [0] + list(diff(swtl))
        # #print dswtl
        starts = [k - 1 for k in range(len(dswtl)) if dswtl[k] == 1]
        ends = [k - 1 for k in range(len(dswtl)) if dswtl[k] == -1]

        # if len(ends) < len(starts):
        # 	ends.append(len(dswtl) - 1)

        # ##
        # #A.
        # DECIMATE starting and ending truncated swings
        if starts and ends:
            if starts[0] > ends[0]:
                del ends[0]
            if starts[-1] > ends[-1]:
                del starts[-1]
            # #
            # ##

            # #print 'nSTARTS, nENDS', map(len, [starts, ends])
            assert (len(starts) == len(ends))

        pairs = zip(starts, ends)
        # #print pairs

        # ##
        # #B.
        # #DECIMATING "bad termini" to remove variability artifact introduced by in-swing track chopping at the termini
        # Potentially "Bad termini" are arbitrarily defined as those that are less that 5 frames away (25ms) from start and end of the video sequence
        # #This should fix the problematic artefacts that escaped A. decimation
        # ##

        pairs = filter(lambda ind: ind[0] >= 3 and ind[1] <= len(X) - 4, pairs)
        # #
        # ##

        # print ">>", pairs
        swamp = [X[end] - X[start] for start, end in pairs]
        # swamp = filter(lambda val: val != 0, swamp)
        return swamp
示例#57
0
 def test_diffusion_order():
     """
 Test the convergence order of the continuous integrator using
 the "circle" system
 """
     print "test_diffusion_order"
     sde = SDE(lambda x: dot([[0, -1], [1, 0]], x),
               lambda x, dw: zeros_like(x), 2)
     ee = []
     for dt in 10**-linspace(0.5, 4.5, 11):
         print "dt=", dt
         t, y, w = sde([0, 1], 0, 0.2 * pi, dt)
         err = sqrt((y[-1, 0] + sin(t[-1]))**2 + (y[-1, 1] - cos(t[-1]))**2)
         ee.append([dt, err])
     ee = asarray(ee)
     loglog(ee[:, 0], ee[:, 1], 'o-')
     lee = log(ee)
     lee = lee - mean(lee, axis=0)[newaxis, :]
     lee = diff(lee, axis=0)
     title("Order %.2f" % mean(lee[:, 1] / lee[:, 0]))
示例#58
0
def norm_hist_bins(y, bins=10, normed='height'):
    """Just like the matplotlib mlab.hist, but can normalize by height.

    normed can be 'area' (produces matplotlib behavior, area is 1), 
    any False value (no normalization), or any True value (normalization).

    Original docs from matplotlib:

    Return the histogram of y with bins equally sized bins.  If bins
    is an array, use the bins.  Return value is
    (n,x) where n is the count for each bin in x

    If normed is False, return the counts in the first element of the
    return tuple.  If normed is True, return the probability density
    n/(len(y)*dbin)
    
    If y has rank>1, it will be raveled
    Credits: the Numeric 22 documentation
    """
    y = asarray(y)
    if len(y.shape)>1: y = ravel(y)

    if not iterable(bins):
        ymin, ymax = min(y), max(y)
        if ymin==ymax:
            ymin -= 0.5
            ymax += 0.5

        if bins==1: bins=ymax
        dy = (ymax-ymin)/bins
        bins = ymin + dy*arange(bins)
    n = searchsorted(sort(y), bins)
    n = diff(concatenate([n, [len(y)]]))
    if normed:
        if normed == 'area':
            db = bins[1]-bins[0]
        else:
            db = 1.0
        return 1/(len(y)*db)*n, bins
    else:
        return n, bins