コード例 #1
0
ファイル: __init__.py プロジェクト: proggy/cofunc
 def diff(self, n=1):
     """Calculate the n-th order differential of the function."""
     # 2012-06-27 - 2012-07-11
     x, y = self.diff(n=n-1).xy() if n > 1 else self.xy()
     x = self._filter_double(x)
     return type(self)(x=.5*(x[1:]+x[:-1]),
                       y=scipy.diff(y)/scipy.diff(x))
コード例 #2
0
ファイル: ponzi4.py プロジェクト: Twizanex/bellman
def gradient2(f):
	dM = g.grid_M[1] - g.grid_M[0]
	dD = g.grid_D[1] - g.grid_D[0]	
	g1 = scipy.diff(f, 1, 0) / dM
	g2 = scipy.diff(f, 1, 1) / dD
	g3 = addNanRow(g1)
	g4 = addNanCol(g2)
	return [g3, g4]
コード例 #3
0
def entropy2(values):
    """Calculate the entropy of vector values.
    
    values will be flattened to a 1d ndarray."""
    
    values = sp.asarray(values).flatten()
    p = sp.diff(sp.c_[0,sp.diff(sp.sort(values)).nonzero(), values.size])/float(values.size)
    H = (p*sp.log2(p)).sum()
    return -H
コード例 #4
0
def entropy2(values):
    """Calculate the entropy of vector values.
    
    values will be flattened to a 1d ndarray."""
    
    values = values.flatten()
    M = len(sp.unique(values))
    p = sp.diff(sp.c_[sp.diff(sp.sort(values)).nonzero(), len(values)])/float(len(values))
    H = -((p*sp.log2(p)).sum())
    return H
コード例 #5
0
 def test_respects_refractory_period(self):
     refractory = 100 * pq.ms
     st = self.invoke_gen_func(
         self.highRate, max_spikes=1000, refractory=refractory)
     self.assertGreater(
         sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))),
         refractory.rescale(pq.s).magnitude)
     st = self.invoke_gen_func(
         self.highRate, t_stop=10 * pq.s, refractory=refractory)
     self.assertGreater(
         sp.amax(sp.absolute(sp.diff(st.rescale(pq.s).magnitude))),
         refractory.rescale(pq.s).magnitude)
コード例 #6
0
ファイル: PDE.py プロジェクト: atkm/reed-modeling
def unique(a):
    order = sp.lexsort(a.T)
    a = a[order]
    diff = sp.diff(a, axis=0)
    ui = sp.ones(len(a), 'bool')
    ui[1:] = (diff != 0).any(axis=1) 
    return a[ui]
コード例 #7
0
    def return_results(self, pores=None, throats=None, **kwargs):
        r"""
        Send results of simulation out the the appropriate locations.

        This is a basic version of the update that simply sends out the main
        result (quantity). More elaborate updates should be subclassed.
        """
        if pores is None:
            pores = self.Ps
        if throats is None:
            throats = self.Ts

        phase_quantity = self._quantity.replace(self._phase.name + '_', '')
        if phase_quantity not in self._phase.props():
            self._phase[phase_quantity] = sp.nan
        self._phase[phase_quantity][pores] = self[self._quantity][pores]
        conn_arr = self._net.find_connected_pores(self.Ts)
        dx = sp.squeeze(sp.diff(self[self._quantity][conn_arr], n=1, axis=1))
        g = self['throat.conductance']
        rate = sp.absolute(g * dx)
        if 'throat.rate' not in self._phase.props():
            self._phase['throat.rate'] = sp.nan
        self._phase['throat.rate'][throats] = rate[throats]
        logger.debug('Results of ' + self.name +
                     ' algorithm have been added to ' + self._phase.name)
コード例 #8
0
ファイル: picture.py プロジェクト: aroundnothing/optar
def decode(file_name):
    border.rotate(file_name)
    image = Image.open("temp.png")
    q = border.find("temp.png")
    ind = sp.argmin(sp.sum(q, 1), 0)
    up_left = q[ind, 0] + 2
    up_top = q[ind, 1] + 2
    d_right = q[ind+1, 0] - 3
    d_bottom = q[ind-1, 1] - 3

    box = (up_left, up_top, d_right, d_bottom)
    region = image.crop(box)
    h_sum = sp.sum(region, 0)
    m = argrelmax(sp.correlate(h_sum, h_sum, 'same'))
    s = sp.average(sp.diff(m))
    m = int(round(d_right - up_left)/s)
    if m % 3 != 0:
        m += 3 - m % 3
    n = int(round(d_bottom - up_top)/s)
    if n % 4 != 0:
        n += 4 - n % 4
    s = int(round(s))+1

    region = region.resize((s*m, s*n), PIL.Image.ANTIALIAS)
    region.save("0.png")
    pix = region.load()
    matrix = mix.off(rec.matrix(pix, s, m, n))
    str2 = hamming.decode(array_to_str(matrix))

    return hamming.bin_to_str(str2)
コード例 #9
0
ファイル: qrsdetect.py プロジェクト: likeMyCode/ECGDiagnose
    def qrsDetect(self, qrslead=0):
         """Detect QRS onsets using modified PT algorithm
         """
         # If ecg is a vector, it will be used for qrs detection.
         # If it is a matrix, use qrslead (default 0)
         if len(self.data.shape) == 1:
             self.raw_ecg = self.data
         else:
             self.raw_ecg = self.data[:,qrslead]
         
         self.filtered_ecg = self.bpfilter(self.raw_ecg)
         self.diff_ecg  = scipy.diff(self.filtered_ecg)
         self.sq_ecg = abs(self.diff_ecg)
         self.int_ecg = self.mw_integrate(self.sq_ecg)
         
         # Construct buffers with last 8 values 
         self._initializeBuffers(self.int_ecg)
         
         peaks = self.peakDetect(self.int_ecg)
         self.checkPeaks(peaks, self.int_ecg)

         # compensate for delay during integration
         self.QRSpeaks = self.QRSpeaks  - 40 * (self.samplingrate / 1000)
         
         #print ("length of qrs peaks and ecg", len(self.QRSpeaks), len(self.raw_ecg))
         #print(self.QRSpeaks)
         return self.QRSpeaks
コード例 #10
0
ファイル: util.py プロジェクト: chandranorth/usadelRicatti
def continuous_phase(phase, axis=0, center=False):
    """Add and subtract 2 pi such that the phase in the array is
       as continuous as possible, along first or given axis. Optionally,
       it also centers the phase data so that the average is smallest."""

    phase = _n.array(phase, copy=0)

    rowshape = list(phase.shape)
    
    if len(rowshape) > 0:
        rowshape[axis] = 1

        slip = _n.concatenate([ _n.zeros(rowshape),
                                scipy.diff(phase, axis=axis) ],
                              axis=axis)
        slip = _n.around(slip/(2*_n.pi))
        cumslip = scipy.cumsum(slip, axis=axis)

        phase = phase - 2*_n.pi*cumslip
    else:
        pass

    if center:
        offset = _n.around(scipy.average(phase, axis=axis)/(2*_n.pi))
        offset = _n.reshape(offset, rowshape)
        offset = _n.repeat(offset, cumslip.shape[axis], axis=axis)
        phase = phase - 2*_n.pi*offset
    
    return phase
コード例 #11
0
def fitLength(t, v, startInd, startModel, vErr=0):
  
  expInd = max(enumerate(diff(v)), key=lambda x:x[1])[0] + 1
  startInd = max(expInd, startInd)
  
  # only fit the exponential part of the traces
  t, v = t[startInd:], v[startInd:]
  
  # start with initial guess and fit parameters of model
  startParams = [vErr] + [p for pair in startModel for p in pair]
  
  try:
    params, pCov = optimize.curve_fit(expSumParams, t, v, p0=startParams,
                                      maxfev=500)
  except RuntimeError as err:
    if 'Number of calls to function has reached maxfev' in err.message:
      print(err.message)
      return [], float('Inf'), float('inf')
    else:
      raise
    
  fitModel = [(tau, dV) for tau, dV in zip(params[1::2], params[2::2])]
  vErr = params[0]
  fitV = expSum(t, fitModel, vErr=vErr)
  vResid = sqrt(sum((vn - fitVn)**2 for vn, fitVn in zip(v, fitV)) / len(fitV))
  return fitModel, vErr, vResid
コード例 #12
0
def getStepWindow(t, v):
  # return time and voltage vectors during the stimulus period only
  
  # find the point of maximum voltage, and cut off everything afterwards
  maxInd, maxV = max(enumerate(v), key=lambda x: x[1])
  minInd, minV = min(enumerate(v), key=lambda x: x[1])
  if maxV - v[0] > v[0] - minV:
    # this is a positive step
    t = t[:maxInd]
    v = scipy.array(v[:maxInd])
  else:
    # this is a negative step, flip it for now
    t = t[:minInd]
    v = v[0] - scipy.array(v[:minInd])
  
  # re-center time to start at the point of maximum voltage change
  diffV = diff(v)
  dVInd, maxDV = max(enumerate(diffV), key=lambda x: x[1])
  dVInd -= 1
  while diffV[dVInd] > 0:
    dVInd -= 1
  dVInd += 1
  
  t -= t[dVInd]
  v -= v[dVInd]
  
  return t, v, dVInd
コード例 #13
0
    def setUp(self) :
        # Read in just to fiugre out the band structure.
        this_test_file = 'testdata/testfile_guppi_rotated.fits'
        Reader = fitsGBT.Reader(this_test_file, feedback=0)
        Blocks = Reader.read((0,),())
        bands = ()
        for Data in Blocks:
            n_chan = Data.dims[3]
            Data.calc_freq()
            freq = Data.freq
            delta = abs(sp.mean(sp.diff(freq)))
            centre = freq[n_chan//2]
            band = int(centre/1e6)
            bands += (band,)
            map = sp.zeros((n_chan, 15, 11))
            map = algebra.make_vect(map, axis_names=('freq', 'ra', 'dec'))
            map.set_axis_info('freq', centre, -delta)
            map.set_axis_info('ra', 218, -0.2)
            map.set_axis_info('dec', 2, 0.2)
            algebra.save('./testout_clean_map_I_' + str(band) + '.npy', map)

        self.params = {'sm_input_root' : 'testdata/',
                       'sm_file_middles' : ("testfile",),
                       'sm_input_end' : "_guppi_rotated.fits",
                       'sm_output_root' : "./testout_",
                       'sm_output_end' : "_sub.fits",
                       'sm_solve_for_gain' : True,
                       'sm_gain_output_end' : 'gain.pickle',
                       'sm_map_input_root' : './testout_',
                       'sm_map_type' : 'clean_map_',
                       'sm_map_polarizations' : ('I',),
                       'sm_map_bands' : bands
                       }
コード例 #14
0
def whittaker(inY,inL=15,inD=2):
    
    """
    cette fonction permet de lisser le signal d'entrée en utilisant le filtre de Whittaker.
    ref: Eilers, P.H.C. (2003) "A perfect smoother", Analytical Chemistry, 75, 3631 – 3636.
    
    Entrée:
    inY: le signal à lisser
    
    inL: correspond au parmètre de lissage. Plus il est grand plus le lissage est élevé. par défaut à 15
    comme dans l'article : 
    Geng, L.; Ma, M.; Wang, X.; Yu, W.; Jia, S.; Wang, H.	Comparison of Eight Techniques 
    for Reconstructing Multi-Satellite Sensor Time-Series NDVI Data Sets in the Heihe River Basin, China. 
    Remote Sens. 2014, 6, 2024-2049.
    
    inD: ordre des differences de pénalités
    
    """
    
    m=sp.size(inY)
    E=sp.eye(m)
    D=sp.diff(E,inD)
    Z=E+ (inL*sp.dot(D,sp.transpose(D)))
    ws=sp.linalg.solve(Z,inY)
    
    return ws
コード例 #15
0
ファイル: __Cubic__.py プロジェクト: Maggie1988/OpenPNM
    def add_boundaries(self):
        r'''
        This method uses ``clone_pores`` to clone the surface pores (labeled
        'left','right', etc), then shifts them to the periphery of the domain,
        and gives them the label 'right_face', 'left_face', etc.
        '''
        x,y,z = self['pore.coords'].T

        Lc = sp.amax(sp.diff(x)) #this currently works but is very fragile

        offset = {}
        offset['front'] = offset['left'] = offset['bottom'] = [0,0,0]
        offset['back']  = [x.max()+Lc/2,0,0]
        offset['right'] = [0,y.max()+Lc/2,0]
        offset['top']   = [0,0,z.max()+Lc/2]

        scale = {}
        scale['front']  = scale['back']  = [0,1,1]
        scale['left']   = scale['right'] = [1,0,1]
        scale['bottom'] = scale['top']   = [1,1,0]

        for label in ['front','back','left','right','bottom','top']:
            ps = self.pores(label)
            self.clone_pores(pores=ps,apply_label=[label+'_boundary','boundary'])
            #Translate cloned pores
            ind = self.pores(label+'_boundary')
            coords = self['pore.coords'][ind]
            coords = coords*scale[label] + offset[label]
            self['pore.coords'][ind] = coords
コード例 #16
0
ファイル: OLS.py プロジェクト: strategist922/qikify
    def dw(self):
        """Calculates the Durbin-Waston statistic
        """
        de = diff(self.e,1)
        dw = dot(de,de) / dot(self.e,self.e)

        return dw
コード例 #17
0
def plot_disc_policy():
    #First compute policy function...==========================================
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    psiprime = sp.ones((N,1))
    gamma = 0.1
    beta = 0.9
    
    m = 15
    tol = 10**-9
    delta = 1+tol
    it = 0
    while (delta >= tol):
        it += 1
        
        psi = psiprime.copy()
        arg1 = sp.repeat(sp.transpose(VEprime),N,0)
        arg2 = sp.repeat(EVUprime,N,1)
        arg = sp.array([arg2,arg1])
        psiprime = sp.argmax(arg,axis = 0) 
        
        for j in sp.arange(0,m):
            VE = VEprime.copy()
            VU = VUprime.copy()
            EVU = EVUprime.copy()
            VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
            arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
            arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
            arg = arg1+arg2
            VUprime = alpha_util_grid + beta*arg
            EVUprime = sp.dot(VUprime,f)  
    
        
    
        delta = sp.linalg.norm(psiprime -psi) 

    wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
    wr = w[wr_ind]
    print w[250],wr[250]
        
    #Then plot=================================================================
    plt.plot(w,psiprime[250,:]) 
    plt.ylim([-.5,1.5])      
    plt.xlabel(r'$w\prime$')
    plt.yticks([0,1])
    plt.savefig('disc_policy.pdf')
コード例 #18
0
ファイル: ecgtk.py プロジェクト: Basildcruz/ecgtk
    def qrs_detect(self, qrslead=0):
         """Detect QRS onsets using modified PT algorithm
         """
         # If ecg is a vector, it will be used for qrs detection.
         # If it is a matrix, use qrslead (default 0)
         if len(self.data.shape) == 1:
             self.raw_ecg = self.data
         else:
             self.raw_ecg = self.data[:,qrslead]

         # butterworth bandpass filter 5 - 15 Hz
         self.filtered_ecg = self._bpfilter(self.raw_ecg)
         # differentiate
         self.diff_ecg  = scipy.diff(self.filtered_ecg)
         # take absolute value (was square in original PT implementation)
         self.abs_ecg = abs(self.diff_ecg)
         # integrate 
         self.int_ecg = self._mw_integrate(self.abs_ecg)
         
         # Construct buffers with last 8 values 
         self._initializeBuffers(self.int_ecg)

         # collect all unique local peaks in the integrated ecg
         peaks = self.peakDetect(self.int_ecg)

         # classify each peak as QRS or noise
         self.checkPeaks(peaks, self.int_ecg)


         # compensate for delay during integration
         self.QRSpeaks -= 40 * (self.samplingrate / 1000)
         
         return self.QRSpeaks
コード例 #19
0
ファイル: DelaunayVoronoiDual.py プロジェクト: PMEAL/OpenPNM
    def add_boundary_pores(self, labels=['top', 'bottom', 'front', 'back',
                                         'left', 'right'], offset=None):
        r"""
        Add boundary pores to the specified faces of the network

        Pores are offset from the faces of the domain.

        Parameters
        ----------
        labels : string or list of strings
            The labels indicating the pores defining each face where boundary
            pores are to be added (e.g. 'left' or ['left', 'right'])

        offset : scalar or array_like
            The spacing of the network (e.g. [1, 1, 1]).  This must be given
            since it can be quite difficult to infer from the network,
            for instance if boundary pores have already added to other faces.

        """
        offset = sp.array(offset)
        if offset.size == 1:
            offset = sp.ones(3)*offset
        for item in labels:
            Ps = self.pores(item)
            coords = sp.absolute(self['pore.coords'][Ps])
            axis = sp.count_nonzero(sp.diff(coords, axis=0), axis=0) == 0
            ax_off = sp.array(axis, dtype=int)*offset
            if sp.amin(coords) == sp.amin(coords[:, sp.where(axis)[0]]):
                ax_off = -1*ax_off
            topotools.add_boundary_pores(network=self, pores=Ps, offset=ax_off,
                                         apply_label=item + '_boundary')
コード例 #20
0
def detect_signals():
    vector, label = weeklydataset_sg_ndata(
        "/media/4AC0AB31C0AB21E5/Documents and Settings/Claudio/Documenti/Thesis/Workloads/MSClaudio/ews/access_log-20110805.csv",
        [],
    )
    x, target = aggregatebymins_sg_ndata(vector[1])

    starttime = time.time()
    y = array(target)
    t = array(x)
    thr = max(y) * 2 / 3
    print thr
    I = pylab.find(y > thr)
    #    print I
    #    pylab.plot(t,y, 'b',label='signal')
    #    pylab.plot(t[I], y[I],'ro',label='detections')
    #    pylab.plot([0, t[len(t)-1]], [thr,thr], 'g--')

    J = pylab.find(diff(I) > 1)
    argpeak = []
    targetpeak = []
    for K in split(I, J + 1):
        ytag = y[K]
        peak = pylab.find(ytag == max(ytag))
        #        pylab.plot(peak+K[0],ytag[peak],'sg',ms=7)
        argpeak.append(peak + K[0])
        targetpeak.append(ytag[peak])

    eta = time.time() - starttime
    print "time elapsed %f" % eta
    return list(itertools.chain(*argpeak)), list(itertools.chain(*targetpeak))
コード例 #21
0
ファイル: utakata_time_freq.py プロジェクト: mackee/utakata
  def scanSound(self, source, minnotel):
    binarized = source
    scale = 60. / self.wavetempo * (binarized[0].size / self.duration)
    noise_length = scale*minnotel

    antinoised = sp.zeros_like(binarized)

    for i in range(sp.shape(binarized)[0]):
      new_line = binarized[i, :].copy()
      diffed = sp.diff(new_line)
      ones_keys = sp.where(diffed == 1)[0]
      minus_keys = sp.where(diffed == -1)[0]
      
      if(ones_keys.size != 0 and minus_keys.size != 0):
        if(ones_keys[0] > minus_keys[0]):
          new_line = self.cutNoise(
              (0, minus_keys[0]), noise_length, new_line)
          minus_keys = sp.delete(minus_keys, 0)

        if(ones_keys[-1] > minus_keys[-1]):
          new_line = self.cutNoise(
              (ones_keys[-1], new_line.size-1), noise_length, new_line)
          ones_keys = sp.delete(ones_keys, -1)

        for j in range(sp.size(ones_keys)):
          new_line = self.cutNoise(
              (ones_keys[j], minus_keys[j]), noise_length, new_line)

        antinoised[i, :] = new_line

    return antinoised
コード例 #22
0
 def execute(self):
     self.power_mat, self.thermal_expectation = self.full_calculation()
     n_chan = self.power_mat.shape[1]
     n_freq = self.power_mat.shape[0]
     # Calculate the the mean channel correlations at low frequencies.
     low_f_mat = sp.mean(self.power_mat[1:4 * n_chan + 1,:,:], 0).real
     # Factorize it into preinciple components.
     e, v = linalg.eigh(low_f_mat)
     self.low_f_mode_values = e
     # Make sure the eigenvalues are sorted.
     if sp.any(sp.diff(e) < 0):
         raise RuntimeError("Eigenvalues not sorted.")
     self.low_f_modes = v
     # Now subtract out the noisiest channel modes and see what is left.
     n_modes_subtract = 10
     mode_subtracted_power_mat = sp.copy(self.power_mat.real)
     mode_subtracted_auto_power = sp.empty((n_modes_subtract, n_freq))
     for ii in range(n_modes_subtract):
         mode = v[:,-ii]
         amp = sp.sum(mode[:,None] * mode_subtracted_power_mat, 1)
         amp = sp.sum(amp * mode, 1)
         to_subtract = amp[:,None,None] * mode[:,None] * mode
         mode_subtracted_power_mat -= to_subtract
         auto_power = mode_subtracted_power_mat.view()
         auto_power.shape = (n_freq, n_chan**2)
         auto_power = auto_power[:,::n_chan + 1]
         mode_subtracted_auto_power[ii,:] = sp.mean(auto_power, -1)
     self.subtracted_auto_power = mode_subtracted_auto_power
コード例 #23
0
ファイル: gr_filter_design.py プロジェクト: GREO/gnuradio-git
 def get_fft(self, fs, taps, Npts):
     Ts = 1.0/fs
     fftpts = fftpack.fft(taps, Npts)
     self.freq = scipy.arange(0, fs, 1.0/(Npts*Ts))        
     self.fftdB = 20.0*scipy.log10(abs(fftpts))
     self.fftDeg = scipy.unwrap(scipy.angle(fftpts))
     self.groupDelay = -scipy.diff(self.fftDeg)
コード例 #24
0
ファイル: helpers.py プロジェクト: vilhelmp/adapy
def get_indices(arr, vals, disp=False):
    """

    Get the indices of all the elements between vals[0] and vals[1].
    Alternatively also between vals[2] and vals[3] if they are given.

    Input:
        arr  : the array in which to look for the elements
        vals : a list with either 2 or 4 values that corresponds
               limits inbetween which the indices of the values

    Optional argument(s):
        disp : Bolean parameter, if True it displays start and end
               index and the number of channels inbetween. Only works
               for value lists of length 2.

    Assumes the values in 'arr' is the mid values and that it is evenly
    spaced for all values.

    ********************** Important! **********************************
    The output indices are Python friendly, i.e. they are 0-based. Take
    when using the indices in other software e.g. GILDAS, MIRIAD, which
    are 1-based.

    --------------------------------------------------------------------

                            oOO Changelog OOo

    *2012/02
        Added more documentation, "important" notice about indexing
    *2011/07
        Removed +1 in the output indices to be compatible with rest of
        module, where Pythons 0-based indexing is used.
    *2010/12
        Doc written
    *2010/06
        Funciton created
    """

    from scipy import concatenate, where, array, diff

    dx = abs(0.5 * diff(arr)[0])
    if len(vals) == 4:
        v1, v2, v3, v4 = vals + array([-1, 1, -1, 1]) * dx
        # if the user wants two velocity areas to calculate noise
        low = where((arr >= v1) * (arr <= v2))[0]
        high = where((arr >= v3) * (arr <= v4))[0]
        channels = concatenate((low, high))
    elif len(vals) == 2:
        v1, v2 = vals + array([-1, 1]) * dx
        # channels = where((arr>=v1)*(arr<v2))[0]+1
        # this is because if +1 it is FITS/Fortran safe
        # changed: removed +1 for consistency in program
        channels = where((arr >= v1) * (arr <= v2))[0]
    #
    if disp and len(vals) == 2:
        first, last = channels.min(), channels.max()
        n = last - first + 1
        print "\nFirst: %d,\n Last: %d\n Nchan: %d\n" % (first, last, n)
    return channels
コード例 #25
0
ファイル: linalg.py プロジェクト: arunchaganty/spectral
def spectral_gap( x, k = None ):
    """Minimum difference in eigenvalues"""
    # Get the singular values
    s = svdvals( x )
    if k is not None:
        s = s[:k]

    return (sc.diff( s )).min() / s[0]
コード例 #26
0
ファイル: hrf.py プロジェクト: Solvi/pyhrf
def getCanoHRF_tderivative(duration=25., dt=.5):

    hcano = getCanoHRF()
    hcano[-1] = 0.
    tAxis = np.arange(0, duration+dt, dt)
    hderiv = diff(hcano[1], n=1)/dt
    hderiv = np.hstack(([0], hderiv))
    return tAxis, hderiv
コード例 #27
0
ファイル: radar_scope.py プロジェクト: ElOceanografo/PyRad
	def zero_heading_indices(self, threshold=3000):
		'''
		Returns the indices of the data blocks where the heading signal
		indicates that the antenna was at the zero point.
		'''
		heading = self.heading_buffer[:, 0]
		i1, i2 = sp.where(sp.diff(heading) > threshold)[0][0:2]
		return i1, i2
コード例 #28
0
ファイル: linalg.py プロジェクト: arunchaganty/spectral
def eigen_sep( X, k = None ):
    """Minimum difference in eigenvalues"""
    # Get the eigenvalues
    s = (eigvals( X )).real
    if k is not None:
        s = s[:k]

    return min(abs(s).min(), abs(sc.diff( s )).min())
コード例 #29
0
 def envelopeFromArray(self, array):
     envelope=[]
     diffs=scipy.diff(array)
     #identifies envelope peaks via first derivative
     for index in range( 1,len(diffs) ):
         if diffs[index] <= 0 and diffs[index-1] >= 0:
             envelope.append(array[index])
     return envelope
コード例 #30
0
    def updateStates(self):
        peak_deltas = scipy.diff(self.peaks)
        trough_deltas = scipy.diff(self.troughs)
        
        peak_envelope = self.envelopeFromArray(self.peaks)
        trough_envelope = self.envelopeFromArray([-trough for trough in self.troughs])
        
        peak_envelope_deltas = scipy.diff(peak_envelope)
        trough_envelope_deltas = scipy.diff(trough_envelope)
        envelope_detected = (len(peak_envelope_deltas) > 0 and
                             len(trough_envelope_deltas) > 0)
        
        [unstable, limit_cycle, converging, converged] = [True, True, True, True]
        
        if len(self.peaks) > 1 and len(self.troughs) > 1:
            #only unstable if error always rises
            unstable = ((peak_deltas > 0.0).all() or 
                        (trough_deltas < 0.0).all() or
                        (envelope_detected and
                         (peak_envelope_deltas > 0.0).all() and
                         (trough_envelope_deltas > 0.0).all()))
            
            #only converging if error is always decreasing
            converging = (((peak_deltas < 0.0).all() and (trough_deltas > 0.0).all()) or
                          (envelope_detected and
                           (peak_envelope_deltas < 0.0).all() and 
                           (trough_envelope_deltas < 0.0).all())) 

            #in a limit cycle if error ever rises
            limit_cycle = not converging and not unstable
        else:
            [unstable, limit_cycle, converging] = [False, False, False]
            
        #only converged if both final peak and trough are below
        #converged threshold
        if len(self.peaks) > 0 and len(self.troughs) > 0:
            converged = ((self.peaks[-1] < self.convergence_level) and
                         (self.troughs[-1] > -self.convergence_level))
        else:
            converged = False
            
        self.flags["unstable"]    = unstable
        self.flags["limit_cycle"] = limit_cycle
        self.flags["converging"]  = converging
        self.flags["converged"]   = converged
コード例 #31
0
def compute_FFFS(x, y, xt, yt, param_grid_fffs):
    maxVar = param_grid_fffs['maxvar']
    clf = npfs.GMMFeaturesSelection()
    clf.learn_gmm(x, y)
    idx, crit, [] = clf.selection('forward',
                                  x,
                                  y,
                                  criterion='F1Mean',
                                  varNb=maxVar,
                                  nfold=5)
    d_crit = sp.diff(crit) / crit[:-1]
    nv = sp.where(d_crit < param_grid_fffs['threshold'])[0][0]
    print("Number of variables {}".format(nv))
    yp = clf.predict_gmm(xt, featIdx=idx[:nv])[0]
    return f1_score(yt, yp, average='weighted')
コード例 #32
0
ファイル: ModelInputs.py プロジェクト: jswoboda/SimISR
def pyglowinput(
        latlonalt=[65.1367, -147.4472, 250.00],
        dn_list=[datetime(2015, 3, 21, 8, 00),
                 datetime(2015, 3, 21, 20, 00)],
        z=None):

    if z is None:
        z = sp.linspace(50., 1000., 200)
    dn_diff = sp.diff(dn_list)
    dn_diff_sec = dn_diff[-1].seconds
    timelist = sp.array([calendar.timegm(i.timetuple()) for i in dn_list])
    time_arr = sp.column_stack((timelist, sp.roll(timelist, -1)))
    time_arr[-1, -1] = time_arr[-1, 0] + dn_diff_sec

    v = []
    coords = sp.column_stack((sp.zeros((len(z), 2), dtype=z.dtype), z))
    all_spec = ['O+', 'NO+', 'O2+', 'H+', 'HE+']
    Param_List = sp.zeros((len(z), len(dn_list), len(all_spec), 2))
    for idn, dn in enumerate(dn_list):
        for iz, zcur in enumerate(z):
            latlonalt[2] = zcur
            pt = Point(dn, *latlonalt)
            pt.run_igrf()
            pt.run_msis()
            pt.run_iri()

            # so the zonal pt.u and meriodinal winds pt.v  will coorispond to x and y even though they are
            # supposed to be east west and north south. Pyglow does not seem to have
            # vertical winds.
            v.append([pt.u, pt.v, 0])

            for is1, ispec in enumerate(all_spec):
                Param_List[iz, idn, is1, 0] = pt.ni[ispec] * 1e6

            Param_List[iz, idn, :, 1] = pt.Ti

            Param_List[iz, idn, -1, 0] = pt.ne * 1e6
            Param_List[iz, idn, -1, 1] = pt.Te
    Param_sum = Param_List[:, :, :, 0].sum(0).sum(0)
    spec_keep = Param_sum > 0.
    species = sp.array(all_spec)[spec_keep[:-1]].tolist()
    species.append('e-')
    Param_List[:, :] = Param_List[:, :, spec_keep]
    Iono_out = IonoContainer(coords,
                             Param_List,
                             times=time_arr,
                             species=species)
    return Iono_out
コード例 #33
0
def trainBursts(train, aBinSize=0.05, maxTime=1.0, alpha=0.95):
    """
    Find bursts of high activity (high instantaneous firing rate) in a train of pulses with respect to a statistical threshold upQuant. Example:
    bursts= trainBursts(train, aBinSize=0.05, maxTime=1.0, alpha=0.95)
    """
    nSpikes=len(train)
    isi= sc.zeros(nSpikes); 
    dIFR= sc.zeros(nSpikes)
    isi[1:]= train[1:]-train[:-1]
    ifr=1/isi
    dIFR[1:]= (((ifr[1:]-ifr[:-1])/isi[1:]) + ((ifr[1:]-ifr[:-1])/isi[:-1]))/2.0
    ifrCDF,ifrCDFInverse= calcCDF(ifr,graph=0)
    #isiCDF,isiCDFInverse= calcCDF(isi,graph=0)
    # Find spikes during high activity
    ifrUpThresh= calcThresholdsFromCDF(ifrCDFInverse, (alpha,))
    ifrDnThresh= calcThresholdsFromCDF(ifrCDFInverse, (1-alpha,))
    #isiThresh= calcThresholdsFromCDF(isiCDFInverse, (alpha,))
    #rHighInds= sc.where( ifr>ifrThresh)[0]
    rHighInds= sc.where( ifr>ifrUpThresh)[0]
    lHighInds= rHighInds-1
    highInds= sc.union1d(rHighInds,lHighInds)
    highSpikeTimes= train[highInds]
    #lowSpikeTimes= train[lowInds]
    aa= sc.zeros(len(highInds))
    aa[1:]= sc.diff(highInds)
    #bb= sc.zeros(len(lowInds))
    #bb[1:]= sc.diff(lowInds)
    startInds=sc.where(aa!=1)[0]
    burstStarts= highSpikeTimes[startInds]
    burstEnds= highSpikeTimes[startInds-1]
    nBursts= len(burstStarts)
    burstStarts=burstStarts[:-1]
    burstEnds=burstEnds[1:]    
    pBurst = sc.float32(nBursts)/nSpikes
    burstDurs = burstEnds-burstStarts
    c,b= xcorr(train,train, aBinSize, maxTime, minTime=0)
    cHz = c/aBinSize
    c[0]=0.0
    bursts={"train": train, 
        "highInds":highInds, "highSpikeTimes":highSpikeTimes, 
        "burstStarts":burstStarts, "burstEnds":burstEnds, 
        "nBursts":nBursts, "nSpikes": nSpikes, "pBurst": pBurst, "burstDurs": burstDurs,
        "alpha":alpha,
        "ifr":ifr, "ifrCDF":ifrCDF, "ifrCDFInverse":ifrCDFInverse,"ifrThresh":ifrUpThresh,
        "isi":isi, #"isiCDF":isiCDF, "isiCDFInverse":isiCDFInverse,"isiThresh":isiThresh,
        "dIFR":dIFR, 
        "aCorrHz":cHz, "aCorrBins":b}
    return bursts
コード例 #34
0
def test_optdiv3(beta=0.9, grid=scipy.arange(21.0), zDraws=scipy.array([-1.0]*25 + [1.0]*75), useValueIter=True):
	time1 = time.time()
	localvars = {}
	
	def postVIterCallbackFn(nIter, currentVArray, newVArray, optControls, stoppingResult):		
		global g_iterList
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		

	def postPIterCallbackFn(nIter, newVArray, currentPolicyArrayList, greedyPolicyList, stoppingResult):				
		(stoppingDecision, diff) = stoppingResult
		print("iter %d, diff %f" % (nIter, diff))
		localvars[0] = nIter		
		
	initialVArray = grid;								# initial guess for V: a linear fn
	initialPolicyArray = grid;							# initial guess for d: pay out everything
	params = OptDivParams3(grid, beta, zDraws);
	if (useValueIter == True):		
		result = bellman.grid_valueIteration([grid], initialVArray, params, postIterCallbackFn=postVIterCallbackFn, parallel=True)
		(nIter, currentVArray, newVArray, optControls) = result
	else:
		result = bellman.grid_policyIteration([grid], [initialPolicyArray], initialVArray, params, postIterCallbackFn=postPIterCallbackFn, parallel=False)
		(nIter, currentVArray, currentPolicyArrayList, greedyPolicyList) = result
		newVArray = currentVArray
		optControls = currentPolicyArrayList
	time2 = time.time()
	nIters = localvars[0]
	print("total time: %f, avg time: %f" % (time2-time1, (time2-time1)/nIters))
	
	optd_fn = linterp.LinInterp1D(grid, optControls[0])
	# plot V
	fig = plt.figure()
	ax = fig.add_subplot(111)
	ax.plot(grid, newVArray)
	dx = grid[1] - grid[0]
	deriv = scipy.diff(newVArray) / dx
	ax.plot(grid[:-1], deriv)
	ax.set_xlabel("M")
	ax.set_ylabel("V")		
	# plot optimal d
	fig = plt.figure()	
	ax = fig.add_subplot(111)
	ax.plot(grid, optControls[0])	
	ax.set_xlabel("M")
	ax.set_ylabel("optimal d")	
	plt.show()
	return result
コード例 #35
0
def get_CR(List, Repeat):

    if len(set(List)) == 1:  # NO CR if list has only identical elements
        return [0, 0]

    else:
        AllMeans = [
        ]  # List of model(!) sample means for every bootstrap iteration
        AllProbs = [
        ]  # List of model(!) sample posterior prob. for every iteration
        ProbOfMean = {}
        N = len(List)  # Number of data points

        for i in range(Repeat):  # Repeated bootstrap iterations
            Rands = [
                0
            ]  # Following Rubin et al. to get data probabilities from Dirichlet distrib.
            CurrAvg = 0
            for j in range(N - 1):
                Rands.append(random.random())
            Rands.append(1)
            Rands.sort()
            P = scipy.diff(
                Rands
            )  # List of random numbers that add to 1 and are used as data probabilities
            for j in range(N):
                CurrAvg += P[j] * List[j]  # Sample mean
            AllMeans.append(CurrAvg)
            AllProbs.append(1)  # Posterior weights are 1 (i.e. no weighting)

        for k in range(len(AllMeans)):
            ProbOfMean[AllMeans[k]] = AllProbs[k]
        AllMeans.sort()
        TotalProb = sum(AllProbs)
        CumulProb = 0
        perc_min = 0
        perc_max = 0
        for m in AllMeans:  # Iterating through sorted means, identifying that mean at which a certain percentile of probs is reached
            CumulProb += ProbOfMean[m]
            if (CumulProb > 0.025 * TotalProb) and (perc_min == 0):
                perc_min = m
            if (CumulProb > 0.975 * TotalProb) and (perc_max == 0):
                perc_max = m

    return [
        perc_min, perc_max
    ]  # Credibility Region is defined by min/max percentiles of sampling means
コード例 #36
0
        def weight_radial(catalogue, rwidth=rwidth, redges=redges):

            self.logger.info('Radial integral constraint.')

            distance = catalogue.distance()
            dmin, dmax = distance.min(), distance.max()
            self.logger.info('Comoving distances: {:.1f} - {:.1f}.'.format(
                dmin, dmax))

            if redges is not None:
                radialedges = scipy.array(redges)
                rwidth = scipy.mean(scipy.diff(radialedges))
                rmin, rmax = radialedges.min(), radialedges.max()
                if (rmin > dmin) or (rmax < dmax):
                    raise ValueError(
                        'Provided radial-edges ({:.1f} - {:.1f}) do not encompass the full survey ({:.1f} - {:.1f}).'
                        .format(rmin, rmax, dmin, dmax))
                self.logger.info(
                    'Provided radial-edges of width: {:.1f} and range: {:.1f} - {:.1f}.'
                    .format(rwidth, rmin, rmax))
                nbins = len(radialedges) - 1
            else:
                self.logger.info(
                    'Provided radial-width: {:.1f}.'.format(rwidth))
                nbins = scipy.rint((dmax - dmin) / rwidth).astype(int)
                radialedges = scipy.linspace(dmin, dmax + 1e-9, nbins + 1)

            self.logger.info(
                'There are {:d} radial-bins with an average of {:.1f} objects.'
                .format(nbins,
                        len(catalogue) * 1. / nbins))
            ibin = scipy.digitize(distance, radialedges, right=False) - 1

            for iaddbin in range(catalogue.attrs['naddbins']):
                mask = catalogue['iaddbin'] == iaddbin
                wcounts = scipy.bincount(ibin[mask],
                                         weights=catalogue['Weight'][mask],
                                         minlength=nbins)
                catalogue['Weight'][mask] /= wcounts[ibin[mask]]

            attrs = {'radialedges': radialedges, 'nbins': nbins}

            def bin(catalogue):
                return scipy.digitize(
                    catalogue.distance(), radialedges, right=False) - 1

            return attrs, bin
コード例 #37
0
	def combine(cls,ensembles,errors={},params=[]):
		assert (scipy.diff(map(len,ensembles)) == 0).all()
		self = ensembles[0].deepcopy()
		self.weights = {}
		for par in params:
			if errors.get(par,None) is None:
				errors[par] = [e.std(par) for e in ensembles]
			err = scipy.array(errors[par])
			corr = scipy.corrcoef([e[par] for e in ensembles])
			cov = corr * err[:,None].dot(err[None,:])
			invcov = scipy.linalg.inv(cov)
			self.weights[par] = scipy.sum(invcov,axis=-1)/scipy.sum(invcov)
			self.logger.info('Using for {} weights: {}.'.format(par,self.weights[par])) 
			for key in self._ARGS:
				tmp = getattr(self,key)
				tmp[par] = scipy.average([getattr(e,key)[par] for e in ensembles],weights=self.weights[par])
		return self
コード例 #38
0
    def compute(
        self,
        anaSigList,
        sign='-',
        left_sweep=0.001,
        right_sweep=0.002,
        baseline_time=.05,
        rise_time=.001,
        peak_time=.001,
        threshold=20.,
        window=.0001,
    ):
        anaSig = anaSigList[0]
        sr = anaSig.sampling_rate

        nb = int(baseline_time * sr)
        nb = int(nb / 2.) * 2 + 1
        nr = int(rise_time * sr)
        np = int(peak_time * sr)
        print peak_time, np, sr
        nw = int(window * sr)

        sigBase = convolve(anaSig.signal, ones(nb, dtype='f') / nb, 'same')
        #~ sigBase = signal.medfilt(anaSig.signal , kernel_size = nb)

        sigPeak = convolve(anaSig.signal, ones(np, dtype='f') / np, 'same')

        if sign == '-':
            aboves = sigBase[:-(nr + nb / 2 + np / 2
                                )] - sigPeak[nr + nb / 2 + np / 2:] > threshold
        elif sign == '+':
            aboves = sigPeak[nr + nb / 2 + np / 2:] - sigBase[:-(
                nr + nb / 2 + np / 2)] > threshold

        print aboves
        # detection when n point consecutive more than window
        aboves = aboves.astype('f')
        aboves[0] = 0
        aboves[-1] = 0
        indup, = where(diff(aboves) > .5)

        return indup + nr + nb / 2 + np / 2

        #~ inddown, = where( diff(aboves)<-.5)
        #~ print indup
        print inddown
コード例 #39
0
ファイル: scattering.py プロジェクト: beidongjiedeguang/guang
def frequency(p, m, theta_begin, alpha, dthe=3):
    Nu = np.zeros(len(alpha))
    if p == 1:
        flag_p = 1
        fName = '单角度数据//m={:.2f},p={:d},theta={:.2f}'.format(m, p, theta_begin)
        theta_max = def2sca(theta_def(90, p, m), p)
        # ************************************************************************************
        #         print('p={},m={:.2f},theta={},theta_max={:.2f}'.format(p,m,theta_begin,theta_max))
        # ************************************************************************************
        if theta_begin > theta_max:
            flag_p = 0
            # *********************************************************
    #             print('该角度范围无对应{}阶光线'.format(p))
    # *********************************************************

    if p == 2:
        flag_p = 2
        fName = '单角度数据//m={:.2f},p={:d},theta={:.2f}'.format(m, p, theta_begin)
        theta_e = arccos(np.sqrt((m**2 - 1) / (p**2 - 1))) * 180 / pi
        theta_min = min(def2sca(theta_def(90, p, m), p),
                        def2sca(theta_def(theta_e, p, m), p))
        print('p={},m={:.2f},theta={},theta_min={:.2f}'.format(
            p, m, theta_begin, theta_min))
        if theta_begin < theta_min:
            flag_p = 0
            # *********************************************************
    #             print('该角度范围无对应{}阶光线'.format(p))
    # *********************************************************

    if flag_p:
        for i_alpha in range(len(alpha)):
            pdiff = []

            # 注意!!!!这里如果直接求导得到频率必须是将散射角间隔分为恰好一度!
            M = 2

            ths = np.linspace(theta_begin, theta_begin + dthe, M)
            for _ in ths:
                pdiff.append(phase_diff(_, alpha[i_alpha], 0, p, m))

            pdiff = np.array(pdiff).reshape(1, M)  # 将列表转为数组
            pdd = abs(diff(pdiff))  # 求导,斜率取正

            Nu[i_alpha] = pdd[0][0] / (2 * pi * dthe / (M - 1))
        return Nu, fName
コード例 #40
0
def Problem5Real():
    N = 500
    w = sp.linspace(0,100,N)
    w = w.reshape(N,1)
    u = lambda c: sp.sqrt(c)
    util_vec = u(w)
    alpha = 0.5
    alpha_util = u(alpha*w)
    alpha_util_grid = sp.repeat(alpha_util,N,1)
    
    m = 20
    v = 200
    f = discretelognorm(w,m,v)
    
    VEprime = sp.zeros((N,1))
    VUprime    = sp.zeros((N,N))
    EVUprime = sp.zeros((N,1))
    gamma = 0.1
    beta = 0.9
    
    tol = 10**-9
    delta1 = 1+tol
    delta2 = 1+tol
    it = 0
    while ((delta1 >= tol) or (delta2 >= tol)):
        it += 1
        VE = VEprime.copy()
        VU = VUprime.copy()
        EVU = EVUprime.copy()
        
        VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
        arg1 = sp.repeat(sp.transpose(VE),N,0)
        arg2 = sp.repeat(EVU,N,1)
        arg = sp.array([arg2,arg1])
        VUprime = alpha_util_grid + beta*sp.amax(arg,axis = 0)
        psi = sp.argmax(arg,axis = 0)
        EVUprime = sp.dot(VUprime,f)
    
        delta1 = sp.linalg.norm(VEprime - VE)
        delta2 = sp.linalg.norm(VUprime - VU)
        #print(delta1)
        
    wr_ind = sp.argmax(sp.diff(psi), axis = 1)
    wr = w[wr_ind]
    return wr
コード例 #41
0
ファイル: foreleg_touches.py プロジェクト: faymanns/Qbio_2018
    def load_frame_ROI(self, in_dir, lane):
        """
		Load the frame and ROI data. 
		
		Parameters
		----------
		
		lane: int
			lane number of walking arena; prob from 0 to 4.
		dir: str
			directory of exp data csv from DLC
		
		"""

        filename = os.path.join(in_dir, 'lane_%s_topbyroi.txt' % lane)
        with open(filename, 'r') as fp:
            self.frm_ROI = sp.loadtxt(fp)
        self.ROI_switch_idxs = sp.where(sp.diff(self.frm_ROI[:, 1]) != 0)[0]
コード例 #42
0
    def update_results(self):
        r'''
        Send results of simulation out the the appropriate locations.

        This is a basic version of the update that simply sends out the main
        result (quantity). More elaborate updates should be subclassed.
        '''
        phase_quantity = self._quantity.replace(self._phase.name + '_', "")
        self._phase[phase_quantity] = self[self._quantity]
        dx = sp.squeeze(
            sp.diff(self[self._quantity][self._net.find_connected_pores(
                self.throats())],
                    n=1,
                    axis=1))
        g = self['throat.conductance']
        self._phase['throat.rate'] = sp.absolute(g * dx)
        self._logger.debug('Results of ' + self.name +
                           ' algorithm have been added to ' + self._phase.name)
コード例 #43
0
ファイル: tfl.py プロジェクト: andyljones/flat-scraper-2
def get_edges(routes):
    results = []
    for timetable in walk_timetables(routes):
        origin = timetable['timetable']['departureStopId']
        for route in timetable['timetable']['routes']:
            for intervals in route['stationIntervals']:
                stops = [origin] + [x['stopId'] for x in intervals['intervals']]
                edges = [[s, t] for s, t in zip(stops, stops[1:])]
                
                times = [0] + [x['timeToArrival'] for x in intervals['intervals']]
                weights = list(sp.diff(sp.array(times)))
                
                results.extend([[s, t, w] for (s, t), w in zip(edges, weights)])
    
    results = pd.DataFrame(results, columns=['origin', 'destination', 'time'])
    results = results.groupby(['origin', 'destination']).mean()
    
    return results
コード例 #44
0
def build_block_toeplitz_from_xcorrs(tf, chan_set, xcorrs, dtype=None):
    """builds a block toeplitz matrix from a set of channel xcorrs

    :type tf: int
    :param tf: desired lag in samples
    :type chan_set: list
    :param chan_set: list of channel ids to build the channel set from. the
        block covariance matrix will be build so that blocks are ordered from
        lower to higher channel id.
    :type xcorrs: XcorrStore
    :param xcorrs: XcorrStore object holding the xcorrs for various channel
        combinations
    :type dtype: dtype derivable
    :param dtype: will be passed to the constructor for the matrix returned.
        Default=None
    """

    # init and checks
    assert tf <= xcorrs._tf
    chan_set = sorted(chan_set)
    nc = len(chan_set)
    assert all(sp.diff(chan_set) >= 1)
    assert max(chan_set) < xcorrs._nc
    assert all([key in xcorrs for key in build_idx_set(chan_set)
                ]), 'no data for requested channels'
    rval = sp.empty((tf * nc, tf * nc), dtype=dtype)

    # build blocks and insert into fout
    for i in xrange(nc):
        m = chan_set[i]
        for j in xrange(i, nc):
            n = chan_set[j]
            xc = xcorrs[m, n]
            sample0 = xc.size / 2
            r = xc[sample0:sample0 + tf]
            c = xc[sample0 + 1 - tf:sample0 + 1][::-1]
            #c = xc[sample0:sample0 - tf:-1]
            block_ij = sp_la.toeplitz(c, r)
            rval[i * tf:(i + 1) * tf, j * tf:(j + 1) * tf] = block_ij
            if i != j:
                rval[j * tf:(j + 1) * tf, i * tf:(i + 1) * tf] = block_ij.T

    # return
    return rval
コード例 #45
0
def isi(t, V, spike_thresh=0):
    """isi_mean, isi_dev = isi(t, V, spike_thresh=0)

    Given voltage (V) and time (t) vectors, isi calculates the mean interspike
    interval (isi_mean) and the standard deviation of the interspike interval
    (isi_dev).

    You can optionally specify the spike threshold (defaults to 0).

    This uses an assumption that every time it spikes, the voltage increases
    above the given spike threshold.

    The method used here is not robust. If the data is noisy then there will
    likely be false positives. With a model however, it should work very
    well.
    """
    time = t[sp.logical_and(V[:-1] < spike_thresh, V[1:] >= spike_thresh)]
    dt = sp.diff(time)
    return sp.mean(dt), sp.std(dt)
コード例 #46
0
ファイル: diff.py プロジェクト: kjnam/vtools
def time_diff(ts,order=1):
    """ Generate left side derative of a ts of input orders.

    Parameters
    -----------
    ts : :class:`~vtools.data.timeseries.TimeSeries`
        Series to interpolate. Must has data of one dimension, and regular.
    
    order : int
        Order of derative.
    

    Returns
    -------
    result : vtools.data.time_series.TimeSeries
        A regular series with derative.
        
    Raise
    --------
    ValueError
        If input time series is shorter than order+1 or is not regular.
        
    """
    

    if len(ts)<order+1:
        raise ValueError("input timeseries is not"
                         "long enough.")
    if not ts.is_regular():
        raise ValueError("time_diff only support regular"
                         "time series for the time being.")
    
    delta_data=diff(ts.data,order)
    rt_start=ts.times[order]
    interval=ts.interval
    
    prop=deepcopy(ts.props)
    prop[AGGREGATION]=INDIVIDUAL
    prop[TIMESTAMP]=INST
    
    rt=rts(delta_data,rt_start,interval,prop)
    return rt    
コード例 #47
0
 def test_statistical_physical_units(self):
     n_trials = 1000
     n_points = 200
     dt = 0.001
     window = sp.ones(n_points, dtype=float)
     power = sp.zeros(n_points // 2)
     for ii in range(n_trials):
         wave = self.amp1 * random.randn(n_points)
         power += npow.prune_power(npow.calculate_power(wave)).real
     power /= n_trials
     power = npow.make_power_physical_units(power, dt)
     freqs = npow.ps_freq_axis(dt, n_points)
     df = abs(sp.mean(sp.diff(freqs)))
     # The integral of the power spectrum should be the variance. Factor of
     # 2 get the negitive frequencies.
     integrated_power = sp.sum(power) * df * 2
     self.assertTrue(
         sp.allclose(integrated_power / self.amp1**2,
                     1.0,
                     atol=4.0 * (2.0 / sp.sqrt(n_trials * n_points))))
コード例 #48
0
    def rebin(self, xnew):
        """
        Rebin the spectrum on a new grid named xnew
        """

        #Does not need equal spaced bins, but why would you not?
        xnew.sort()

        fbin = sp.zeros(xnew.size)
        efbin = sp.zeros(xnew.size)

        #up sampling is just interpolation
        m = (self.wv >= xnew[0]) * (self.wv <= xnew[-1])
        if self.wv[m].size <= xnew.size - 1:
            fbin, efbin = self.interp(xnew)

        else:
            #down sampling--
            #1) define bins so that xnew is at the center.
            #2) interpolate to account for fractional pixel weights
            #3) take the mean within each bin
            db = 0.5 * sp.diff(xnew)
            b2 = xnew[1::] - db
            b2 = sp.insert(b2, 0, xnew[0])

            insert = sp.searchsorted(self.wv, b2)
            xinsert = sp.insert(self.wv, insert, xnew)
            xinsert = sp.unique(xinsert)
            yinsert, zinsert = self.interp(xinsert)

            i = sp.digitize(xinsert, b2)
            for j in range(b2.size):
                iuse = sp.where(i == j + 1)[0]
                fbin[j] = sp.mean(yinsert[iuse])
                efbin[j] = sp.mean(zinsert[iuse])

        self._wv = xnew
        if self.ef is not None:
            self._ef = efbin
        self.f = fbin
        assert self.wv.size == self.f.size
コード例 #49
0
ファイル: cvlognet.py プロジェクト: thorlock12/glmnet_python
def auc(y, prob, w):
    if len(w) == 0:
        mindiff = scipy.amin(scipy.diff(scipy.unique(prob)))
        pert = scipy.random.uniform(0, mindiff / 3, prob.size)
        t, rprob = scipy.unique(prob + pert, return_inverse=True)
        n1 = scipy.sum(y, keepdims=True)
        n0 = y.shape[0] - n1
        u = scipy.sum(rprob[y == 1]) - n1 * (n1 + 1) / 2
        result = u / (n1 * n0)
    else:
        op = scipy.argsort(prob)
        y = y[op]
        w = w[op]
        cw = scipy.cumsum(w)
        w1 = w[y == 1]
        cw1 = scipy.cumsum(w1)
        wauc = scipy.sum(w1 * (cw[y == 1] - cw1))
        sumw = cw1[-1]
        sumw = sumw * (c1[-1] - sumw)
        result = wauc / sumw
    return (result)
コード例 #50
0
def setgrid(extent):
    dn = 1000
    width = extent[1] - extent[0]
    height = extent[3] - extent[2]
    Y = np.linspace(extent[2], extent[3], dn + 1)

    rsize = scipy.diff(Y)[0]
    Y = setXY(Y)
    Y = np.expand_dims(Y, axis=1)

    N_x = int(np.ceil(width / rsize))
    Y = np.repeat(Y, N_x, axis=1)

    X = np.zeros((N_x + 1, ))
    X[0] = extent[0]
    for i in range(N_x):
        X[i + 1] = X[i] + rsize
    X = setXY(X)
    X = np.expand_dims(X, axis=0)
    X = np.repeat(X, dn, axis=0)
    return X, Y, dn, N_x, rsize
コード例 #51
0
def rgb_hist(I, ax, bins=256):

    # run over red, green, and blue channels
    channels = ('r', 'g', 'b')
    for i, color in enumerate(channels):
        # get count pixel intensities for this channel
        counts, bins, patches = plt.hist(I[:, :, i].flatten(),
                                         bins=bins,
                                         normed=True,
                                         visible=False)

        # hack: choose mid-point of bins as centers
        centers = bins[:-1] + sp.diff(bins) / 2

        # line plot with fill
        plt.plot(centers, counts, color=color)
        ax.fill_between(centers, 0, counts, color=color, alpha=0.25)

    # hack for matlab's axes('square') function
    # http://www.mail-archive.com/[email protected]/msg08388.html
    plt.axis('tight')
    ax.set_aspect(1. / ax.get_data_ratio())
コード例 #52
0
def get_refperiod_violations(spike_trains, refperiod, progress=None):
    """ Return the refractory period violations in the given spike trains
    for the specified refractory period.

    :param dict spike_trains: Dictionary of lists of
        :class:`neo.core.SpikeTrain` objects.
    :param refperiod: The refractory period (time).
    :type refperiod: Quantity scalar
    :param progress: Set this parameter to report progress.
    :type progress: :class:`.progress_indicator.ProgressIndicator`
    :returns: Two values:

        * The total number of violations.
        * A dictionary (with the same indices as ``spike_trains``) of
          arrays with violation times (Quantity 1D with the same unit as
          ``refperiod``) for each spike train.
    :rtype: int, dict """
    if type(refperiod) != pq.Quantity or \
            refperiod.simplified.dimensionality != pq.s.dimensionality:
        raise ValueError('refperiod must be a time quantity!')

    if not progress:
        progress = ProgressIndicator()

    total_violations = 0
    violations = {}
    for u, tL in spike_trains.iteritems():
        violations[u] = []
        for i, t in enumerate(tL):
            st = t.copy()
            st.sort()
            isi = sp.diff(st)

            violations[u].append(st[isi < refperiod].rescale(refperiod.units))
            total_violations += len(violations[u][i])

            progress.step()

    return total_violations, violations
コード例 #53
0
	def get_ROI_splits(self):
		"""
		Get the frames corresponding to a beginning and end of an ROI.
		"""
		
		# The columns of ROI_splits are: ROI, beg idx, end idx, slot number
		self.ROI_splits = sp.zeros(4)
		
		for iS in range(self.num_slots):
			split_idxs = sp.nonzero(sp.diff(self.corr_ROI[:, iS]))[0]
			split_idxs = sp.hstack(([-1], split_idxs))
			if (self.num_frames - 1) not in split_idxs:
				split_idxs = sp.hstack((split_idxs, self.num_frames - 1))
			
			for iI in range(len(split_idxs) - 1):
				idx_beg = split_idxs[iI] + 1
				idx_end = split_idxs[iI + 1] + 1
				if idx_beg+1 >= self.num_frames:
				    continue
				arr = [self.corr_ROI[idx_beg + 1, iS], idx_beg, idx_end, iS]
				self.ROI_splits = sp.vstack((self.ROI_splits.T, arr)).T
		self.ROI_splits = self.ROI_splits.astype(int)
コード例 #54
0
def loadaerdat(datafile='/tmp/aerout.dat', stas=None, nEvents=None, datatype="II"):
	aerdatafh = open(datafile, 'rb')
	k = 0

	while aerdatafh.readline()[0] == "#":
		k += 1
		continue
	
	tmp = aerdatafh.read()
	n = len(tmp) / struct.calcsize('>' + datatype)
	tmad = struct.unpack_from('>' + datatype * n, tmp)
	dat = np.array(tmad)
	dat = dat.reshape(dat.shape[0] / 2, 2)
	
	if nEvents == None:
		nEvents = n		
	if stas == None:
		return dat
	else:
		tm = np.concatenate([[0], sp.diff(dat[:nEvents, 1])])
		ad = stas.STAddrPhysicalExtract(dat[:nEvents, 0]).transpose()
		return [tm, ad]
コード例 #55
0
    def __init__(self, ta_object: MDTrajectory,
                 box_size: tuple, grid_size: np.ndarray,
                 frag_id: int = 0, average_solute: bool = False):
        """ Create a pcf ta_object.

        Parameters
        ----------
        ta_object : MDTrajectory
            Object with all information about the MD trajectory
        box_size : tuple(3)
            Size of cubic grid where to evaluate the PCF
        grid_size : np.ndarray((Npoints,3), dtype=float)
            Number of points in each direction.
        frag_id : int
            Index indicating which molecule(s) to take as solute.
            By default solute = 0.
        average_solute : bool
            Whether the solute molecules also need to be averaged.

        """
        self.ta_object = ta_object
        histogram_range = np.asarray([-box_size/2., box_size/2.]).T
        self.ta_object.align_along_trajectory(frag_id, self.ta_object.topology, to_file=True)
        if average_solute:
            self.ta_object.get_average_structure(frag_id)
        # Align solvent and find the averaged structure
        edges, self.pcf = self.ta_object.compute_pair_correlation_function(histogram_range,
                                                                           grid_size, frag_id)
        self.grid_size = grid_size
        self.npoints = np.cumprod(grid_size)[-1]
        self.delta = sp.diff(edges)
        edges = np.array(edges)
        # NOTE: only works for cubic grids
        self.points = edges[:, :-1] + self.delta/2.0
        self.total_frames = self.ta_object.nframes
        # Initialize Pybind Class
        self.pbox = BoxGrid(grid_size, self.points)
        self.bohr = 0.529177249
コード例 #56
0
def _(ops, locs, n):
    '''
    Put operators in a circuit and compile them.

    notice the big end are high loc bits!

    Args:
        ops (list): list of single bit operators.
        locs (list): list of positions.
        n (int): total number of bits.

    Returns:
        array: resulting matrix.
    '''
    if np.ndim(locs) == 0:
        locs = [locs]
    if not isinstance(ops, (list, tuple)):
        ops = [ops]
    locs = np.asarray(locs)
    locs = n - locs
    order = np.argsort(locs)
    locs = np.concatenate([[0], locs[order], [n + 1]])
    return _wrap_identity([ops[i] for i in order], np.diff(locs) - 1)
コード例 #57
0
    def to_window(self, **params):

        window = MuFunction(**params)

        window.k = self.k
        window.window = scipy.asarray([res.counts for res in self.result])

        with warnings.catch_warnings():
            warnings.simplefilter('ignore', category=RuntimeWarning)
            window.mu = scipy.ma.average([res.mu for res in self.result],
                                         weights=window.window,
                                         axis=0).data

        muedges = self.result[0].muedges
        mupositive = len(muedges) // 2
        muedges = muedges[mupositive:]
        assert muedges[0] == 0 and (muedges >= 0).all()
        window.mu = scipy.mean(
            [window.mu, -window.mu[::-1]],
            axis=0)[mupositive:]  #-1 because we took half of the shell
        window.window = scipy.sum([window.window, window.window[:, ::-1]],
                                  axis=0)[:, mupositive:]

        empty = scipy.isnan(window.mu)
        window.mu[empty] = edges_to_mid(muedges)[empty]
        window.error = scipy.sqrt(window.window)

        #print window.window.shape
        norm = scipy.sum(window.window * scipy.diff(muedges),
                         axis=-1) / (muedges[-1] - muedges[0])
        window.window /= norm[:, None]
        window.error /= norm[:, None]

        window.pad_zero()

        return window
コード例 #58
0
	def ROI_corrected(self):
		"""
		Correct ROI by incorporating minimum transition time
		"""
		
		for iS in range(self.num_slots):

			# Indices at which ROI changes
			splits = sp.nonzero(sp.diff(self.raw_ROI[:, iS]))[0]
			
			# For each of these transitions, check length for false transitions
			for iSplit in sp.arange(len(splits))[::-1]:
				
				# Frame-length of the current ROI
				split_beg = splits[iSplit - 1]
				split_end = splits[iSplit]
				split_len =  split_end - split_beg 
				
				# If ROI is too short, keep moving backward until you find an 
				# ROI of sufficient length (i.g. ROI > min_ROI_frames)
				split_shift = 0
				while split_len < self.min_ROI_frames:
					split_shift += 1
					
					# If at beginning of array; break the loop
					if iSplit - split_shift < 0: 
						break
					split_shift_beg = splits[iSplit - split_shift - 1]
					split_shift_end = splits[iSplit - split_shift]
					split_len = split_shift_end - split_shift_beg
				
				# Change all false regions back to ROI of sufficient length
				if split_shift > 0:
					frames_to_change = range(split_shift_end, split_end + 1)
					self.corr_ROI[frames_to_change, iS] = \
						self.raw_ROI[split_shift_end, iS]
コード例 #59
0
ファイル: CubicDual.py プロジェクト: zmhhaha/OpenPNM
    def add_boundary_pores(
            self,
            labels=['top', 'bottom', 'front', 'back', 'left', 'right'],
            spacing=None):
        r"""
        Add boundary pores to the specified faces of the network

        Pores are offset from the faces by 1/2 of the given ``spacing``, such
        that they lie directly on the boundaries.

        Parameters
        ----------
        labels : string or list of strings
            The labels indicating the pores defining each face where boundary
            pores are to be added (e.g. 'left' or ['left', 'right'])

        spacing : scalar or array_like
            The spacing of the network (e.g. [1, 1, 1]).  This must be given
            since it can be quite difficult to infer from the network,
            for instance if boundary pores have already added to other faces.

        """
        spacing = sp.array(spacing)
        if spacing.size == 1:
            spacing = sp.ones(3) * spacing
        for item in labels:
            Ps = self.pores(item)
            coords = sp.absolute(self['pore.coords'][Ps])
            axis = sp.count_nonzero(sp.diff(coords, axis=0), axis=0) == 0
            offset = sp.array(axis, dtype=int) * spacing / 2
            if sp.amin(coords) == sp.amin(coords[:, sp.where(axis)[0]]):
                offset = -1 * offset
            topotools.add_boundary_pores(network=self,
                                         pores=Ps,
                                         offset=offset,
                                         apply_label=item + '_boundary')
コード例 #60
0
    def to_window(self, **params):

        window = WindowFunction1D(**params)

        sedges = self.result.sedges
        window.poles = self.result.ells
        window.los = self.result.los
        if hasattr(self.result, 's'):
            window.s = self.result.s
            empty = scipy.isnan(window.s)
            window.s[empty] = edges_to_mid(sedges)[empty]
        else:
            window.s = edges_to_mid(sedges)

        window.window = self.result.counts
        volume = -scipy.diff(s_to_cos(sedges))
        window.window /= volume[None, ...]

        window.pad_zero()

        window.s = s_to_cos(window.s)[::-1]
        window.window[0] = window.window[0][::-1]

        return window