Пример #1
0
    def zoom(self, event):
        newxlim = numpy.array(self.sp_iq.get_xlim())
        curxlim = numpy.array(self.xlim)
        if (newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
            self.xlim = newxlim
            #xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
            #xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
            xmin = max(0, int(ceil(self.sample_rate * (self.xlim[0]))))
            xmax = min(int(ceil(self.sample_rate * (self.xlim[1]))),
                       len(self.iq))

            iq = self.iq[xmin:xmax]
            time = self.time[xmin:xmax]

            iq_fft = self.dofft(iq)
            freq = self.calc_freq(time, self.sample_rate)

            self.plot_fft[0].set_data(freq, iq_fft)
            self.sp_fft.axis(
                [freq.min(),
                 freq.max(),
                 iq_fft.min() - 10,
                 iq_fft.max() + 10])

            draw()
Пример #2
0
def hacerfft(channel):
    tamanho = len(channel)

    fdata = fft(channel)

    print('> FFT realizada')
        
    nUniquePts = int(ceil((tamanho)/2))
    fdata = fdata[0:nUniquePts]
    fdata = abs(fdata)

    fdata = fdata/float(leng)
    fdata = fdata**2

    if leng % 2 > 0:
        fdata[1:int(ceil(len(fdata)))] = fdata[1:int(ceil(len(fdata)))] * 2
    else:
        fdata[1:int(ceil(len(fdata)))-1] = fdata[1:int(ceil(len(fdata)))-1] * 2

    freqArray = arange(0, nUniquePts, 1.0)*(fs/tamanho)
    plot(freqArray/1000, 10*log10(fdata[0:tamanho:1]))

    xlabel('Frequency (kHz)')
    ylabel('Power (dB)')
    plt.show()

    print('> FFT graficada')

    return fdata
Пример #3
0
def hacerfft(channel, texto=None):
    fdata = fft(channel)

    print('\t> FFT realizada')

    nUniquePts = int(ceil((leng) / 2))
    fdata = fdata[0:nUniquePts]
    fdata = abs(fdata)

    fdata = fdata / float(leng)
    fdata = fdata**2

    if leng % 2 > 0:
        fdata[1:int(ceil(len(fdata)))] = fdata[1:int(ceil(len(fdata)))] * 2
    else:
        fdata[1:int(ceil(len(fdata))) -
              1] = fdata[1:int(ceil(len(fdata))) - 1] * 2

    freqArray = arange(0, nUniquePts, 1.0) * (fs / leng)
    plot(freqArray / 1000, 10 * log10(fdata[0:leng:1]))

    if texto == None:
        xlabel('Frequency (kHz)')
    else:
        xlabel('Frequency (kHz)  |  ' + str(texto))
    ylabel('Power (dB)')
    plt.show()

    print('\t> FFT graficada')

    return fdata, freqArray
Пример #4
0
def hacerfft(channel, freq):  # en Hz
    fdata = fft(channel)

    print('> FFT realizada: ' + str(freq))

    nUniquePts = int(ceil((N) / 2))
    fdata = fdata[0:nUniquePts]
    fdata = abs(fdata)

    fdata = fdata / float(N)
    fdata = fdata**2

    if N % 2 > 0:
        fdata[1:int(ceil(len(fdata)))] = fdata[1:int(ceil(len(fdata)))] * 2
    else:
        fdata[1:int(ceil(len(fdata))) -
              1] = fdata[1:int(ceil(len(fdata))) - 1] * 2

    freqArray = arange(0, nUniquePts, 1.0) * (fs / N)
    plot(freqArray, 10 * log10(fdata[0:int(N):1]))

    xlabel('Frequency (Hz)')
    ylabel('Power (dB)')
    plt.show()

    print('> FFT graficada ' + str(freq))

    return fdata
Пример #5
0
def printTrack( fid , resized , frame , pos , sz ):
    p0 = [pos[0]-pylab.floor(sz[0]/2),pos[1]-pylab.ceil(sz[1]/2)]
    p1 = [pos[0]+pylab.floor(sz[0]/2),pos[1]+pylab.ceil(sz[1]/2)]

    if resized:
        p0 = [x*2 for x in p0]
        p1 = [x*2 for x in p1]

    fid.write(str(frame)+","+str(p0[1])+","+str(p0[0])+","+str(p1[1])+","+str(p1[0])+"\n")
Пример #6
0
def generate_cord():
    """
    TODO: Pass the parameters as input arguments.

    Parameters:
      - Fs       : sampling frequency
      - F0       : frequency of the notes forming chord
      - gain     : gains of individual notes in the chord
      - duration : duration of the chord in second
      - alpha    : attenuation in KS algorithm
    """
    Fs = 48000

    # D2, D3, F3, G3, F4, A4, C5, G5
    F0 = 440 * pylab.array(
        [2**-(31.0/12), 2**-(19.0/12), 2**-(16.0/12), 2**(-14.0/12),
         2**-(4.0/12), 1.0, 2**(3.0/12), 2**(10.0/12)])
    gain = [1.2, 3.0, 1.0, 2.2, 1.0, 1.0, 1.0, 3.5]
    duration = 4.0
    alpha = 0.9785

    # Number of samples in the chord.
    nbsample_chord = Fs * duration

    # This is used to correct alpha later, so that all the notes
    # decay together (with the same decay rate).
    first_duration = pylab.ceil(nbsample_chord / pylab.round_(Fs/F0[0]))

    # Initialization.
    chord = pylab.zeros(nbsample_chord)

    for i, f in enumerate(F0):
        print("Working on %g / %g" % (i+1, len(F0)))
        # Get M and duration parameter.
        current_M = pylab.round_(Fs/f)
        current_duration = pylab.ceil(nbsample_chord / current_M)

        # Correct current alpha so that all the notes decay together
        # (with the same decay rate)
        current_alpha = alpha ** (first_duration / current_duration)

        # Let Paul's high D on the bass ring a bit longer.
        if i == 1:
            current_alpha = current_alpha ** 0.8

        # Generate input and output of KS algorithm.
        x = pylab.rand(current_M)
        y = ks(x, current_alpha, int(current_duration))
        y = y[:int(nbsample_chord)]
        
        # Construct the chord by adding the generated note (with the
        # appropriate gain).
        chord = chord + gain[i] * y
        
    return Fs, duration, chord
Пример #7
0
def get_rejection(data, department):
    # print('department {0} data: {1}'.format(department, len(data)))
    data.sort()
    q1 = median([i[0] for i in data][:int(ceil(len(data) / 2))])
    q3 = median([i[0] for i in data][int(ceil(len(data) / 2)):])
    # left = q1 - 1.5 * (q3 - q1)
    left = 1
    right = q3 + 1.5 * (q3 - q1)
    rejection = set(i[1] for i in data if right < i[0] or i[0] < left)
    # print('bad amount = {0}'.format(len(rejection)))
    # print('bad percent = {0}'.format(100 * len(rejection) / len(data)))
    return rejection
def circular_conv(x,h):
    P = len(h)
    n_ = int(ceil(log2(P)))
    h_ = np.concatenate((h,np.zeros(int(2**n_)-P)))
    P = len(h_)
    n1 = int(ceil(len(x)/2**n_))
    x_ = np.concatenate((x,np.zeros(n1*(int(2**n_))-len(x))))
    y = np.zeros(len(x_)+len(h_)-1)
    for i in range(n1):
        temp = np.concatenate((x_[i*P:(i+1)*P],np.zeros(P-1)))
        y[i*P:(i+1)*P+P-1] += np.fft.ifft(np.fft.fft(temp)*np.fft.fft(np.concatenate((h_,np.zeros(len(temp)-len(h_)))))).real
    return y
Пример #9
0
 def inject(self, simulation, time, dtlast):
   #outer_wall = int((time-dtlast)/self.time_between_walls)+1
   #inner_wall = int(time/self.time_between_walls)
   outer_wall = int(ceil((time-dtlast)/self.time_between_walls))
   inner_wall = int(ceil(time/self.time_between_walls)-1)
   inner_handled_wall = inner_wall+self.handled_walls
   s = ''
   for i in range(inner_handled_wall, inner_wall, -1):
     self.inject_wall(simulation, i, time, self.particles_per_wall*(inner_handled_wall-i), boundary=True)
   for i in range(inner_wall, outer_wall-1, -1):
     npart = simulation.get_npart()
     self.inject_wall(simulation, i, time, npart, boundary=False)
Пример #10
0
def plot_spike_histogram(spikes, bin=0.1, total_neurons=None):
    print 'Plotting activity histogram.'
    global figure_dir
    pylab.figure(get_free_figure_number())
    pylab.clf()
    pylab.hist([spike[0] for spike in spikes],
               bins=pylab.ceil(max([spike[0] for spike in spikes])/bin))
    if total_neurons is not None:
        pylab.ylim([0, total_neurons])
    pylab.xlim([0., pylab.ceil(max([spike[0] for spike in spikes]))])
    pylab.xlabel('Time, ms')
    pylab.ylabel('Amount of active neurons')
    pylab.title('Average network activity')
    pylab.savefig(os.path.join(figure_dir, 'activity.png'))
Пример #11
0
def getAvgGreenTime(intergreen1, intergreen2):
    doc = libxml2.parseFile('tls.out')

    lNS = doc.xpathEval("count(/tls-states/tlsstate[@phase='0'])")
    lWE = doc.xpathEval("count(/tls-states/tlsstate[@phase='2'])")

    lIG1 = doc.xpathEval("count(/tls-states/tlsstate[@phase='1'])")
    lIG2 = doc.xpathEval("count(/tls-states/tlsstate[@phase='3'])")

    doc.freeDoc()

    greenNS = lNS / ceil((lIG1 / intergreen1))
    greenWE = lWE / ceil((lIG2 / intergreen2))

    return greenWE, greenNS
Пример #12
0
 def imshow(self,shape,only=None):
     from pylab import subplot,imshow,cm,title,sqrt,ceil
     if only is None:
         only=list(range(len(self.weights)))
 
     L=len(only)
     c=ceil(sqrt(L))
     r=ceil(L/c)
 
     for i,idx in enumerate(only):
         w=self.weights[idx]
         w=w.reshape(shape)
         subplot(r,c,i+1)
         imshow(w,cmap=cm.gray,interpolation='nearest')
         title('PC %d' % (idx))
         
Пример #13
0
def old_spike_psth(data, t1_ms=-250., t2_ms=0., bin_ms=10):
    """Uses data format returned by get_spikes"""
    spike_time_ms = data['spike times ms']
    N_trials = data['trials']
    t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms) * bin_ms + t1_ms
    N_bins = (t2_ms - t1_ms) / bin_ms

    if N_trials > 0:
        all_spikes_ms = pylab.array([], dtype=float)
        for trial in range(len(spike_time_ms)):
            if spike_time_ms[trial] is None:
                continue
            idx = pylab.find((spike_time_ms[trial] >= t1_ms)
                             & (spike_time_ms[trial] <= t2_ms))
            all_spikes_ms = \
              pylab.concatenate((all_spikes_ms, spike_time_ms[trial][idx]))
        spike_n_bin, bin_edges = \
          pylab.histogram(all_spikes_ms, bins = N_bins,
                          range = (t1_ms, t2_ms), new = True)

        spikes_per_trial_in_bin = spike_n_bin / float(N_trials)
        spike_rate = 1000 * spikes_per_trial_in_bin / bin_ms
    else:
        spike_rate = pylab.nan

    bin_center_ms = (bin_edges[1:] + bin_edges[:-1]) / 2.0

    return spike_rate, bin_center_ms
Пример #14
0
def spike_psth(spike_time_ms, t1_ms=-50., t2_ms=250., bin_ms=1):
    """."""
    N_trials = len(spike_time_ms)
    t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms) * bin_ms + t1_ms
    N_bins = (t2_ms - t1_ms) / bin_ms

    spike_count_by_trial = pylab.zeros((N_trials, N_bins), dtype=float)
    if N_trials > 0:
        all_spikes_ms = pylab.array([], dtype=float)
        for trial in range(len(spike_time_ms)):
            if spike_time_ms[trial] is None:
                continue
            idx = pylab.find((spike_time_ms[trial] >= t1_ms)
                             & (spike_time_ms[trial] <= t2_ms))
            spike_count_by_trial[trial,:], bin_edges = \
              pylab.histogram(spike_time_ms[trial][idx], bins = N_bins,
                              range = (t1_ms, t2_ms))

        spike_rate = 1000 * spike_count_by_trial.mean(axis=0) / bin_ms
    else:
        spike_rate = pylab.nan

    dummy, bin_edges = \
      pylab.histogram(None, bins = N_bins, range = (t1_ms, t2_ms))
    bin_center_ms = (bin_edges[1:] + bin_edges[:-1]) / 2.0

    return spike_rate, spike_count_by_trial, bin_center_ms
Пример #15
0
def plot_viz_of_stochs(vars, viz_func, figsize=(8, 6)):
    """ Plot autocorrelation for all stochs in a dict or dict of dicts
    
    :Parameters:
      - `vars` : dictionary
      - `viz_func` : visualazation function such as ``acorr``, ``show_trace``, or ``hist``
      - `figsize` : tuple, size of figure
    
    """
    pl.figure(figsize=figsize)

    cells, stochs = tally_stochs(vars)

    # for each stoch, make an autocorrelation plot for each dimension
    rows = pl.floor(pl.sqrt(cells))
    cols = pl.ceil(cells / rows)

    tile = 1
    for s in sorted(stochs, key=lambda s: s.__name__):
        trace = s.trace()
        if len(trace.shape) == 1:
            trace = trace.reshape((len(trace), 1))
        for d in range(len(pl.atleast_1d(s.value))):
            pl.subplot(rows, cols, tile)
            viz_func(pl.atleast_2d(trace)[:, d])
            pl.title('\n\n%s[%d]' % (s.__name__, d),
                     va='top',
                     ha='center',
                     fontsize=8)
            tile += 1
Пример #16
0
def panel():
    global Ras, Rms, Ie, Ies
    global tstart
    global pan, t0, t1

    #Pulse start time
    tstart = 0.1

    #Ras = [1.,500.,20.] #Ohm*cm
    #Rms = [200.,10000.,200.] #Ohm*cm^2

    Ras = [5.0, 500.0, 50.0]  #Ohm*cm
    Rms = [200.0, 10000.0, 1000.0]  #Ohm*cm^2

    Ie = 4.  #nA
    Ies = pl.c_[pl.ones_like(ns.t)]
    Ies[:pl.ceil(tstart / ns.dt)] = 0.

    t0 = 0.101
    t1 = 0.6

    pan = simulationpanel.SimulationPanel()
    pan.Move((640, 600))
    pan.setdict(globals())
    pan.addcommand('sim()')
    pan.addcommand('plottao()')
    pan.addvar('Ras')
    pan.addvar('Rms')
    pan.addvar('Ie')
    pan.addvar('t0')
    pan.addvar('t1')
    def performFourierDataReturn(self, wav_name, input_location=None):
        if input_location == None:
            input_location = ''
        if wav_name.endswith('.wav'):
            wav_file_name = os.path.join(
                os.path.join(__location__, input_location), wav_name)
        else:
            wav_file_name = os.path.join(
                os.path.join(__location__, input_location), wav_name) + '.wav'
        sampFreq, snd = wavfile.read(wav_file_name)
        # convert 16 bit values to be between -1 to 1
        snd /= 2.0**15
        # get average of data if more than 1 channels
        if self.channels > 1:
            s1 = self.getAverageData(snd)
        else:
            s1 = snd.T[0]

        n = len(s1)
        p = fft(s1)

        nUniquePts = int(ceil((n + 1) / 2.0))
        p = p[0:nUniquePts]
        p = abs(p)

        p /= float(n)
        p **= 2

        if n % 2 > 0:
            p[1:len(p)] = p[1:len(p)] * 2
        else:
            p[1:len(p) - 1] = p[1:len(p) - 1] * 2

        return sampFreq, nUniquePts, n, p
Пример #18
0
def plot_viz_of_stochs(vars, viz_func, figsize=(8,6)):
    """ Plot autocorrelation for all stochs in a dict or dict of dicts
    
    :Parameters:
      - `vars` : dictionary
      - `viz_func` : visualazation function such as ``acorr``, ``show_trace``, or ``hist``
      - `figsize` : tuple, size of figure
    
    """
    pl.figure(figsize=figsize)

    cells, stochs = tally_stochs(vars)

    # for each stoch, make an autocorrelation plot for each dimension
    rows = pl.floor(pl.sqrt(cells))
    cols = pl.ceil(cells/rows)

    tile = 1
    for s in sorted(stochs, key=lambda s: s.__name__):
        trace = s.trace()
        if len(trace.shape) == 1:
            trace = trace.reshape((len(trace), 1))
        for d in range(len(pl.atleast_1d(s.value))):
            pl.subplot(rows, cols, tile)
            viz_func(pl.atleast_2d(trace)[:, d])
            pl.title('\n\n%s[%d]'%(s.__name__, d), va='top', ha='center', fontsize=8)
            tile += 1
Пример #19
0
 def stack_vectors(data, win=1, hop=1, zero_pad=True):
     """
     ::
        create an overlapping stacked vector sequence from a series of vectors
         data - row-wise multidimensional data to stack
         win  - number of consecutive vectors to stack [1]
         hop  - number of vectors to advance per stack [1]
         zero_pad - zero pad if incomplete stack at end 
     """
     data = pylab.atleast_2d(data)
     nrows, dim = data.shape
     hop = min(hop, nrows)
     nvecs = nrows / int(hop) if not zero_pad else int(
         pylab.ceil(nrows / float(hop)))
     features = pylab.zeros((nvecs, win * dim))
     i = 0
     while i < nrows - win + 1:
         features[i / hop, :] = data[i:i + win, :].reshape(1, -1)
         i += hop
     if i / hop < nvecs:
         x = data[i::, :].reshape(1, -1)
         features[i / hop, :] = pylab.c_[x,
                                         pylab.zeros(
                                             (1, win * dim - x.shape[1]))]
     return features
Пример #20
0
def decodefft(finf, data, dropheights=False):
    #output: decoded data with the number of heights reduced
    #two variables are added to the finfo class:
    #deco_num_hei, deco_hrange
    #data must be arranged:
    #    (channels,heights,times) (C-style, profs change faster)
    #fft along the entire(n=None) acquired heights(axis=1), stores in data
    num_chan = data.shape[0]
    num_ipps = data.shape[2]
    num_codes = finf.subcode.shape[0]
    num_bauds = finf.subcode.shape[1]
    NSA = finf.num_hei + num_bauds - 1
    uppower = py.ceil(py.log2(NSA))
    extra = int(2**uppower - finf.num_hei)
    NSA = int(2**uppower)
    fft_code = py.fft(finf.subcode, n=NSA, axis=1).conj()
    data = py.fft(data, n=NSA,
                  axis=1)  #n= None: no cropped data or padded zeros
    for ch in range(num_chan):
        for ipp in range(num_ipps):
            code_i = ipp % num_codes
            data[ch, :, ipp] = data[ch, :, ipp] * fft_code[code_i, :]
    data = py.ifft(data, n=NSA, axis=1)  #fft along the heightsm
    if dropheights:
        return data[:, :-extra - (num_bauds - 1), :]
    else:
        return data[:, :-extra, :]
Пример #21
0
def old_spike_psth(data, t1_ms = -250., t2_ms = 0., bin_ms = 10):
  """Uses data format returned by get_spikes"""
  spike_time_ms = data['spike times ms']
  N_trials = data['trials']
  t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms)*bin_ms + t1_ms
  N_bins = (t2_ms - t1_ms) / bin_ms
  
  if N_trials > 0:
    all_spikes_ms = pylab.array([],dtype=float)
    for trial in range(len(spike_time_ms)):
      if spike_time_ms[trial] is None:
        continue
      idx = pylab.find((spike_time_ms[trial] >= t1_ms) & 
                       (spike_time_ms[trial] <= t2_ms))
      all_spikes_ms = \
        pylab.concatenate((all_spikes_ms, spike_time_ms[trial][idx]))
    spike_n_bin, bin_edges = \
      pylab.histogram(all_spikes_ms, bins = N_bins, 
                      range = (t1_ms, t2_ms), new = True)

    spikes_per_trial_in_bin = spike_n_bin/float(N_trials) 
    spike_rate = 1000*spikes_per_trial_in_bin/bin_ms
  else:
    spike_rate = pylab.nan
  
  bin_center_ms = (bin_edges[1:] + bin_edges[:-1])/2.0

  return spike_rate, bin_center_ms
Пример #22
0
def spike_psth(spike_time_ms, t1_ms = -50., t2_ms = 250., bin_ms = 1):
  """."""
  N_trials = len(spike_time_ms)
  t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms)*bin_ms + t1_ms
  N_bins = (t2_ms - t1_ms) / bin_ms
  
  spike_count_by_trial = pylab.zeros((N_trials,N_bins),dtype=float)
  if N_trials > 0:
    all_spikes_ms = pylab.array([],dtype=float)
    for trial in range(len(spike_time_ms)):
      if spike_time_ms[trial] is None:
        continue
      idx = pylab.find((spike_time_ms[trial] >= t1_ms) & 
                       (spike_time_ms[trial] <= t2_ms))
      spike_count_by_trial[trial,:], bin_edges = \
        pylab.histogram(spike_time_ms[trial][idx], bins = N_bins, 
                        range = (t1_ms, t2_ms))
      
    spike_rate = 1000*spike_count_by_trial.mean(axis=0)/bin_ms
  else:
    spike_rate = pylab.nan

  dummy, bin_edges = \
    pylab.histogram(None, bins = N_bins, range = (t1_ms, t2_ms))
  bin_center_ms = (bin_edges[1:] + bin_edges[:-1])/2.0

  return spike_rate, spike_count_by_trial, bin_center_ms
 def NI_init_ctr(self, intTime=100):
     # intTime in milliseconds
     frequency = int(pl.ceil(1.0e7 / float(intTime)))
     self.trig = Trigger(0, frequency)
     self.trig.StartTask()
     self.ctr = InputCounter(self.trig.get_term(), 1)
     self.voltT = VoltOut('/Weetabix/ao0', self.trig.get_term())
Пример #24
0
def decodefft(finf,data, dropheights = False):
    #output: decoded data with the number of heights reduced
    #two variables are added to the finfo class:
    #deco_num_hei, deco_hrange
    #data must be arranged: 
    #    (channels,heights,times) (C-style, profs change faster)
    #fft along the entire(n=None) acquired heights(axis=1), stores in data
    num_chan = data.shape[0]
    num_ipps = data.shape[2]
    num_codes = finf.subcode.shape[0]
    num_bauds = finf.subcode.shape[1]
    NSA = finf.num_hei + num_bauds - 1
    uppower = py.ceil(py.log2(NSA))
    extra = int(2**uppower - finf.num_hei)
    NSA = int(2**uppower)
    fft_code = py.fft(finf.subcode,n = NSA,axis=1).conj()
    data = py.fft(data,n=NSA,axis=1) #n= None: no cropped data or padded zeros
    for ch in range(num_chan):
        for ipp in range(num_ipps):
            code_i = ipp % num_codes
            data[ch,:,ipp] = data[ch,:,ipp] * fft_code[code_i,:]
    data=py.ifft(data,n=NSA,axis=1) #fft along the heightsm
    if dropheights:
        return data[:,:-extra-(num_bauds-1),:]
    else:
        return data[:,:-extra,:]
Пример #25
0
def displayData(X):
    print "Visualizing"
    m, n = X.shape
    width = round(sqrt(n))
    height = width
    display_rows = int(floor(sqrt(m)))
    display_cols = int(ceil(m/display_rows))

    print "Cell width:", width
    print "Cell height:", height    
    print "Display rows:", display_rows
    print "Display columns:", display_cols
        
    display = zeros((display_rows*height,display_cols*width))

    # Iterate through the training sets, reshape each one and populate
    # the display matrix with the letter matrixes.    
    for xrow in range(0, m):
        rowindex = divide(xrow, display_cols)
        columnindex = remainder(xrow, display_cols)
        rowstart = int(rowindex*height)
        rowend = int((rowindex+1)*height)
        colstart = int(columnindex*width)
        colend = int((columnindex+1)*width)
        display[rowstart:rowend, colstart:colend] = X[xrow,:].reshape(height,width).transpose()
         
    imshow(display, cmap=get_cmap('binary'), interpolation='none')
    
    # Show plot without blocking
    draw()    
Пример #26
0
def gauss1(s, func):
    '''Construct a 1-D Gaussian mask''' 
    # for sufficient result use ceil(6s) by ceil(6s) for a gaussian filter
    # read: http://en.wikipedia.org/wiki/Gaussian_blur for more explaination     
    s = float(s)
    r = int(ceil(3 * s))
    n = int(ceil(6 * s) + 1)
    
    # n * n zero matrix of floats
    gaussFilter = zeros((n), dtype=float)

    # fill gaussFilter[] with gaussian values on x
    for x in range(n):
        gaussFilter[x] = func(s, x - r)
    
    return gaussFilter
Пример #27
0
def panel():
  global Ras,Rms,Ie,Ies
  global tstart
  global pan,t0,t1

  #Pulse start time
  tstart = 0.1

  #Ras = [1.,500.,20.] #Ohm*cm
  #Rms = [200.,10000.,200.] #Ohm*cm^2
  
  Ras = [5.0, 500.0, 50.0] #Ohm*cm
  Rms = [200.0, 10000.0, 1000.0] #Ohm*cm^2
  
  Ie = 4. #nA
  Ies = pl.c_[pl.ones_like(ns.t)]
  Ies[:pl.ceil(tstart/ns.dt)] = 0.

  t0 = 0.101
  t1 = 0.6

  pan = simulationpanel.SimulationPanel()
  pan.Move((640,600))
  pan.setdict(globals())
  pan.addcommand('sim()')
  pan.addcommand('plottao()')
  pan.addvar('Ras')
  pan.addvar('Rms')
  pan.addvar('Ie')
  pan.addvar('t0')
  pan.addvar('t1')
Пример #28
0
def make_run_files(output_folder,
                   sequence_globals,
                   shots,
                   sequence_id,
                   shuffle=False):
    """Does what it says. sequence_globals and shots are of the datatypes
    returned by get_globals and get_shots, one is a nested dictionary with
    string values, and the other a flat dictionary. sequence_id should
    be some identifier unique to this sequence, use generate_sequence_id
    to follow convention. shuffle will randomise the order that the run
    files are generated in with respect to which element of shots they
    come from. This function returns a *generator*. The run files are
    not actually created until you loop over this generator (which gives
    you the filepaths). This is useful for not having to clean up as many
    unused files in the event of failed compilation of labscripts. If you
    want all the run files to be created at some point, simply convert
    the returned generator to a list. The filenames the run files are
    given is simply the sequence_id with increasing integers appended."""
    basename = os.path.join(output_folder, sequence_id)
    nruns = len(shots)
    ndigits = int(pylab.ceil(pylab.log10(nruns)))
    if shuffle:
        random.shuffle(shots)
    for i, shot_globals in enumerate(shots):
        runfilename = ('%s_%0' + str(ndigits) + 'd.h5') % (basename, i)
        make_single_run_file(runfilename, sequence_globals, shot_globals,
                             sequence_id, i, nruns)
        yield runfilename
Пример #29
0
def latest_result_page(log_file_names, index=-1, output_name=None):
    dpi = 50
    if output_name is None:
        fig = figure(figsize=(38, 24), dpi=dpi)
    else:
        fig = Figure(figsize=(38, 24), dpi=dpi)

    reports = [
        extract_diff_failure_reports(log_file)
        for log_file in sorted(log_file_names)
    ]
    last_reports = [
        report[index] for report in reports
        if len(report) > (1 + abs(1 + 2 * index)) / 2
    ]
    columns = int(floor(sqrt(len(last_reports)) * 1.5))
    rows = int(ceil(len(last_reports) / float(columns)))

    for number, report in enumerate(last_reports):
        ax = fig.add_subplot(rows, columns, number + 1)
        report.plot(ax)
    fig.subplots_adjust(left=0.02,
                        right=0.98,
                        top=0.98,
                        bottom=0.02,
                        hspace=0.2)
    if output_name is not None:
        canvas = FigureCanvasAgg(fig)
        canvas.print_figure(output_name, dpi=dpi)
Пример #30
0
    def show_blocking(self, plotfile=''):
        '''Print out the blocking data and show a graph of the behaviour of the standard error with block size.
        
If plotfile is given, then the graph is saved to the specifed file rather than being shown on screen.'''

        # print blocking output
        # header...
        print '%-11s' % ('# of blocks'),
        fmt = '%-14s %-15s %-18s '
        header = ('mean (X_%i)', 'std.err. (X_%i)', 'std.err.err. (X_%i)')
        for data in self.data:
            data_header = tuple(x % (data.data_col) for x in header)
            print fmt % data_header,
        for key in self.covariance:
            str = 'cov(X_%s,X_%s)' % tuple(key.split(','))
            print '%-14s' % (str),
        for key in self.combination_stats:
            fmt = ['mean (X_%s'+self.combination+'X_%s)', 'std.err. (X_%s'+self.combination+'X_%s)']
            strs = tuple([s % tuple(key.split(',')) for s in fmt])
            print '%-16s %-18s' % strs,
        print
        # data
        block_fmt = '%-11i'
        fmt = '%-#14.12g %-#12.8e  %-#18.8e  '
        for s in range(len(self.data[0].stats)):
            print block_fmt % (self.data[0].stats[s].block_size),
            for data in self.data:
                print fmt % (data.stats[s].mean, data.stats[s].se, data.stats[s].se_error),
            for cov in self.covariance.itervalues():
                print '%+-#14.5e' % (cov[s]),
            for comb in self.combination_stats.itervalues():
                print '%-#16.12f %-#18.12e' % (comb[s].mean, comb[s].se),
            print

        # plot standard error 
        if PYLAB:
            # one sub plot per data set.
            nplots = len(self.data)
            for (i, data) in enumerate(self.data):
                pylab.subplot(nplots, 1, i+1)
                blocks = [stat.block_size for stat in data.stats]
                se = [stat.se for stat in data.stats]
                se_error = [stat.se_error for stat in data.stats]
                pylab.semilogx(blocks, se, 'g-', basex=2, label=r'$\sigma(X_{%s})$' % (data.data_col))
                pylab.errorbar(blocks, se, yerr=se_error, fmt=None, ecolor='g')
                xmax = 2**pylab.ceil(pylab.log2(blocks[0]+1))
                pylab.xlim(xmax, 1)
                pylab.ylabel('Standard error')
                pylab.legend(loc=2)
                if i != nplots - 1:
                    # Don't label x axis points.
                    ax = pylab.gca()
                    ax.set_xticklabels([])
            pylab.xlabel('# of blocks')
            if plotfile:
                pylab.savefig(plotfile)
            else:
                pylab.draw()
                pylab.show()
Пример #31
0
def smooth(x,window_len=11,window='hanning'):
    """smooth the data using a window with requested size.
    
    This method is based on the convolution of a scaled window with the signal.
    The signal is prepared by introducing reflected copies of the signal 
    (with the window size) in both ends so that transient parts are minimized
    in the begining and end part of the output signal.

    Copied and modified from http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html  (16/10/2017 - RProux)
    
    input:
        x: the input signal 
        window_len: the dimension of the smoothing window; should be an odd integer
        window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
            flat window will produce a moving average smoothing.

    output:
        the smoothed signal
        
    example:

    t=linspace(-2,2,0.1)
    x=sin(t)+randn(len(t))*0.1
    y=smooth(x)
    
    see also: 
    
    numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
    scipy.signal.lfilter
 
    TODO: the window parameter could be the window itself if an array instead of a string
    NOTE: the window_len parameter should always be odd for reliable output (will shift slightly the data if even)
    """

    if x.ndim != 1:
        raise ValueError("smooth only accepts 1 dimension arrays.")

    if x.size < window_len:
        raise ValueError("Input vector needs to be bigger than window size.")

    if window_len < 3:
        return x

    if window not in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
        raise ValueError("Window is one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")


    s = pl.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
    print(s)
    print(len(s))
    if window == 'flat': #moving average
        w = pl.ones(window_len, 'd')
    else:
        w = eval('pl.{}(window_len)'.format(window))

    y = pl.convolve(w / w.sum(), s, mode='valid')

    return y[int(pl.floor(window_len/2)) : -int(pl.ceil(window_len/2) - 1)]
Пример #32
0
    def zoom(self, event):
        newxlim = numpy.array(self.sp_iq.get_xlim())
        curxlim = numpy.array(self.xlim)
        if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
            #xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
            #xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
            xmin = max(0, int(ceil(self.sample_rate*(newxlim[0]))))
            xmax = min(int(ceil(self.sample_rate*(newxlim[1]))), len(self.iq))

            iq = numpy.array(self.iq[xmin : xmax])
            time = numpy.array(self.time[xmin : xmax])

            iq_psd, freq = self.dopsd(iq)

            self.draw_psd(freq, iq_psd)
            self.xlim = numpy.array(self.sp_iq.get_xlim())

            draw()
Пример #33
0
    def plot(self,only=None):
        from pylab import plot,subplot,sqrt,ceil,title
    
        weights=self.weights_xh.T
        
        if only is None:
            only=list(range(len(weights)))
    
        L=len(only)
        c=ceil(sqrt(L))
        r=ceil(L/c)


        for i,idx in enumerate(only):
            w=weights[idx]
            subplot(r,c,i+1)
            plot(w,'-o')
            title('Filter %d' % (idx))
Пример #34
0
def calculate_activity_histogram(spikes, total_neurons, bin=0.1):
    """Calculates histogram and bins specifically for neurons.

    Bins are provided in seconds instead of milliseconds."""
    hist, bin_edges = pylab.histogram(
        [spike[0] for spike in spikes],
        bins=pylab.ceil(max([spike[0] for spike in spikes])/bin))
    bin_edges = pylab.delete(bin_edges, len(bin_edges)-1) / 1000.
    return [[float(i)/total_neurons for i in hist], bin_edges]
def create_band(band, res, plt=False, high=0, conv=False):
  '''
  print, "create_band, band, res, /bw, /plt, high=high, /conv"
  print, "This program will create a top hat band convolved with a gaussain of sigma = res with freq data spaced from 0 to 1000 (default) GHz"
  print, "band = bandpass region in GHz"
  print, "res = freq. spacing in GHz"
  print, "/plt plot bandpass"
  print, "high=high upper GHz region"
  print, "r = create_band([140, 160], 2.0, /bw, high=500.0)"
  return, 0
  '''

  npts = pl.ceil(4000.0)
  if high : npts = pl.ceil(high*1.0/.25)
  freq = pl.arange(npts)*.25 
  response = pl.zeros(len(freq))

  inb = pl.where((freq < band[1]) & (freq > band[0]))[0]
  if band[0] == band[1] : inb = pl.where_closest(freq, band(0))[0]
  if inb[0] == -1:
    print "Band not between 0-1000 GHZ"
    return 0

  response[inb] = 1

  #let's convolve the band with our resolution. 
  if conv:
    xx = .25*pl.arange(6*res/.25+1)-3*res
    con =pl.exp(-xx**2/(2*res**2))/pl.sqrt(2*pl.pi)/res
    normalization=1./sum(abs(con))
    response = pl.convolve(response, con,'same')*normalization
  

  if plt:
    pl.figure()
    pl.plot(freq, response,'D')
    pl.xlabel('Freq(GHz)')
    pl.ylabel('Reponse')
    pl.xlim(band[0] - 3*res, band[1] + 3*res)


  result = {'Freq': freq, 'resp': response}

  return result
Пример #36
0
def visualize_priors(priors):
    from matplotlib import pyplot as plt
    N = len(priors)
    m = round(sqrt(N * 3 / 4.))
    n = ceil(N / m)
    fig = plt.figure()
    for i, var in enumerate(priors):
        ax = fig.add_subplot(n, m, i)
        ax = ax.hist(priors[var].rvs(1000), 20)
    plt.show()
Пример #37
0
def visualize_priors(priors):
    from matplotlib import pyplot as plt
    N = len(priors)
    m = round(sqrt(N*3/4.))
    n = ceil(N/m)
    fig = plt.figure()
    for i, var in enumerate(priors):
        ax = fig.add_subplot(n,m,i)
        ax = ax.hist(priors[var].rvs(1000), 20)
    plt.show()
Пример #38
0
    def zoom(self, event):
        newxlim = numpy.array(self.sp_iq.get_xlim())
        curxlim = numpy.array(self.xlim)
        if (newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
            #xmin = max(0, int(ceil(self.sample_rate*(newxlim[0] - self.position))))
            #xmax = min(int(ceil(self.sample_rate*(newxlim[1] - self.position))), len(self.iq))
            xmin = max(0, int(ceil(self.sample_rate * (newxlim[0]))))
            xmax = min(int(ceil(self.sample_rate * (newxlim[1]))),
                       len(self.iq))

            iq = numpy.array(self.iq[xmin:xmax])
            time = numpy.array(self.time[xmin:xmax])

            iq_psd, freq = self.dopsd(iq)

            self.draw_psd(freq, iq_psd)
            self.xlim = numpy.array(self.sp_iq.get_xlim())

            draw()
Пример #39
0
 def zeroPadd(self,fbins):
     #zero padd the underlying tdData such that the fbins afterwards are fbins
     spac=1/self._tdData.dt/fbins
     actlen=self._tdData.getLength()
     nozeros=py.ceil(spac-actlen)
     self._tdData.zeroPaddData(nozeros)
     #leave the old bnds in place
     bnds=[min(self.getfreqs()),max(self.getfreqs())]
     zpd=self._calculatefdData(self._tdData)
     self.setFDData(self.getcroppedData(zpd,bnds[0],bnds[1]))
Пример #40
0
def dispims_old(M, height, width, border=0, bordercolor=0.0):
    """ Display a whole stack (colunmwise) of vectorized matrices. Useful 
        eg. to display the weights of a neural network layer.
    """
    numimages = M.shape[1]
    n0 = numpy.int(pylab.ceil(numpy.sqrt(numimages)))
    n1 = numpy.int(pylab.ceil(numpy.sqrt(numimages)))
    im = bordercolor*\
         numpy.ones(((height+border)*n1+border,(width+border)*n0+border),dtype='<f8')
    for i in range(n0):
        for j in range(n1):
            if i * n1 + j < M.shape[1]:
                im[j*(height+border)+border:(j+1)*(height+border)+border,\
                   i*(width+border)+border:(i+1)*(width+border)+border] = \
                numpy.vstack((\
                  numpy.hstack((numpy.reshape(M[:,i*n1+j],(height, width)),\
                         bordercolor*numpy.ones((height,border),dtype=float))),\
                  bordercolor*numpy.ones((border,width+border),dtype=float)\
                  ))
    pylab.imshow(im, cmap=pylab.cm.gray)
Пример #41
0
def save_2D_movie(frame_list, filename, frame_duration):
    """
    Saves a list of 2D numpy arrays of gray shades between 0 and 1 to a zipped tree of PNG files.
    
    Inputs:
        frame_list     - a list of 2D numpy arrays of floats between 0 and 1
        filename       - string specifying the filename where to save the data, has to end on '.zip'
        frame_duration - specifier for the duration per frame, will be stored as additional meta-data
        
    Example:
        >> import numpy
        >> framelist = []
        >> for i in range(100): framelist.append(numpy.random.random([100,100])) # creates a list of 2D numpy arrays with random values between 0. and 1.
        >> save_2D_movie(framelist, 'randommovie100x100x100.zip', 0.1)
    """
    try:
        import zipfile
    except ImportError:
        raise ImportError(
            "ERROR: Python module zipfile not found! Needed by NeuroTools.plotting.save_2D_movie(...)!"
        )
    try:
        import StringIO
    except ImportError:
        raise ImportError(
            "ERROR: Python module StringIO not found! Needed by NeuroTools.plotting.save_2D_movie(...)!"
        )
    assert PILIMAGEUSE, "ERROR: Since PIL has not been detected, the function NeuroTools.plotting.save_2D_movie(...) is not supported!"
    filenameConditionStr = "ERROR: Second argument of function NeuroTools.plotting.save_2D_movie(...) must be a string ending on \".zip\"!"
    assert (type(filename) == str) and (len(filename) > 4) and (
        filename[-4:].lower() == '.zip'), filenameConditionStr
    zf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
    container = filename[:-4]  # remove .zip
    frame_name_format = "frame%s.%dd.png" % (
        "%", pylab.ceil(pylab.log10(len(frame_list))))
    for frame_num, frame in enumerate(frame_list):
        frame_data = [(p, p, p) for p in frame.flat]
        im = Image.new('RGB', frame.shape, 'white')
        im.putdata(frame_data)
        io = StringIO.StringIO()
        im.save(io, format='png')
        pngname = frame_name_format % frame_num
        arcname = "%s/%s" % (container, pngname)
        io.seek(0)
        zf.writestr(arcname, io.read())
        progress_bar(float(frame_num) / len(frame_list))

    # add 'parameters' and 'frames' files to the zip archive
    zf.writestr("%s/parameters" % container,
                'frame_duration = %s' % frame_duration)
    zf.writestr(
        "%s/frames" % container,
        '\n'.join(["frame%.3d.png" % i for i in range(len(frame_list))]))
    zf.close()
Пример #42
0
    def zoom(self, event):
        newxlim = numpy.array(self.sp_iq.get_xlim())
        curxlim = numpy.array(self.xlim)
        if(newxlim[0] != curxlim[0] or newxlim[1] != curxlim[1]):
            self.xlim = newxlim
            #xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0] - self.position))))
            #xmax = min(int(ceil(self.sample_rate*(self.xlim[1] - self.position))), len(self.iq))
            xmin = max(0, int(ceil(self.sample_rate*(self.xlim[0]))))
            xmax = min(int(ceil(self.sample_rate*(self.xlim[1]))), len(self.iq))

            iq = self.iq[xmin : xmax]
            time = self.time[xmin : xmax]

            iq_fft = self.dofft(iq)
            freq = self.calc_freq(time, self.sample_rate)

            self.plot_fft[0].set_data(freq, iq_fft)
            self.sp_fft.axis([freq.min(), freq.max(),
                              iq_fft.min()-10, iq_fft.max()+10])

            draw()
Пример #43
0
def show_images(images,which_images=None,max_images=None):
    from pylab import imshow,subplot,sqrt,ceil,title,cm,gca
    from random import shuffle
    
    if which_images is None:
        which_images=list(range(len(images.data)))
        
    if isinstance(which_images[0],str):  # target names
        which_names=which_images
        which_images=[]
        for idx in range(len(images.data)):
            name=images.target_names[images.targets[idx]]
            if name in which_names:
                which_images.append(idx)
        
    if not max_images is None:
        shuffle(which_images)
        which_images=which_images[:max_images]
    
        
        
    if not which_images:
        raise ValueError("No images selected")
        
    L=len(which_images)
    c=ceil(sqrt(L))
    r=ceil(L/c)

    for i,idx in enumerate(which_images):
        im=images.data[idx]
        name=images.target_names[images.targets[idx]]
        subplot(r,c,i+1)
        imshow(im,interpolation='nearest',cmap=cm.gray)
        title(name)
        
        if i<(L-c):
            gca().set_xticklabels([])
            
        if i%c!=0:
            gca().set_yticklabels([])
Пример #44
0
def plot_spike_rastr(spikes):
    print 'Plotting raster plot.'
    global figure_dir
    pylab.figure(get_free_figure_number())
    pylab.clf()
    pylab.plot([spike[0] for spike in spikes],
               [spike[1] for spike in spikes], 'k.')
    pylab.xlabel('Time, ms')
    pylab.ylabel('# of neuron')
    pylab.title('Network raster activity')
    pylab.axis([0., pylab.ceil(max([spike[0] for spike in spikes])),
                -1, max([spike[1] for spike in spikes]) + 1])
    pylab.savefig(os.path.join(figure_dir, 'spikes.png'))
Пример #45
0
 def val_change(self):
     self.v_i = int(pl.floor(self.start_v.value() * 1000))  # in mV
     self.v_f = int(pl.ceil(self.end_v.value() * 1000))  # in mV
     self.dt = int(self.time.value() * 1000)  # in ms
     if self.no_of_steps == self.steps.value():  # step_size was modified
         self.step_size = self.step_size_field.value()  # in mV
         self.no_of_steps = int(float(self.v_f - self.v_i) // self.step_size) + 1
         self.steps.setValue(self.no_of_steps)
     else:
         self.no_of_steps = self.steps.value()
         self.step_size = int(float(self.v_f - self.v_i) / (self.no_of_steps - 1))
         self.step_size_field.setValue(self.step_size)
     self.repeats = self.reps.value()
     self.counterDev = self.cb.currentIndex()
Пример #46
0
def visualize ():
    sample_rate, snd = load_sample(".\\hh-closed\\dh9.WAV")
    print snd.dtype
    data = normalize(snd)
    print data.shape
    n = data.shape[0]
    length = float(n)
    print length / sample_rate, "s"
    timeArray = arange(0, length, 1)
    timeArray = timeArray / sample_rate
    timeArray = timeArray * 1000  #scale to milliseconds
    ion()
    if False:
        plot(timeArray, data, color='k')
        ylabel('Amplitude')
        xlabel('Time (ms)')
        raw_input("press enter")
        exit()
    p = fft(data) # take the fourier transform
    nUniquePts = ceil((n+1)/2.0)
    print nUniquePts
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / float(n) # scale by the number of points so that
                 # the magnitude does not depend on the length
                 # of the signal or on its sampling frequency
    p = p**2  # square it to get the power

    # multiply by two (see technical document for details)
    # odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) -1] = p[1:len(p) - 1] * 2 # we've got even number of points fft

    print p
    freqArray = arange(0, nUniquePts, 1.0) * (sample_rate / n);
    plot(freqArray/1000, 10*log10(p), color='k')
    xlabel('Frequency (kHz)')
    ylabel('Power (dB)')
    raw_input("press enter")

    m = average(freqArray, weights = p)
    v = average((freqArray - m)**2, weights= p)
    r = sqrt(mean(data**2))
    s = var(data**2)
    print "mean freq", m #TODO: IMPORTANT: this is currently the mean *power*, not the mean freq.  What we want is mean freq weighted by power
    print "var freq", v
    print "rms", r
    print "squared variance", s
def get_uniform_data(*args):

    # internally define variables
    dist_old = args[0]
    elev_old = args[1]
    func_class_old = args[2]
    wav_lst_old = args[3]
    nom_dist_window = args[4]

    window_cnt = mp.ceil(max(dist_old) / nom_dist_window)
    act_dist_window = max(dist_old) / window_cnt

    dist_new = mp.linspace(0.0, dist_old[-1], window_cnt + 1)
    elev_new = np.asarray([-1.0] * len(dist_new))
    func_class_new = np.zeros(len(dist_new)) - 1.0
    wav_lst_new = np.zeros(len(dist_new)) - 1.0

    for i in range(len(dist_new)):
        logical1 = dist_old >= (dist_new[i] - act_dist_window / 2.0)
        logical2 = dist_old <= (dist_new[i] + act_dist_window / 2.0)
        ind = mp.find(np.bitwise_and(logical1, logical2))
        if len(ind) != 0:
            y0 = elev_old[ind]
            elev_new[i] = mp.median(y0)
            func_class_mode, func_class_mode_cnt = stats.mode(
                func_class_old[ind])
            func_class_new[i] = np.copy(func_class_mode)
            wav_mode, wav_mode_cnt = stats.mode(wav_lst_old[ind])
            wav_lst_new[i] = np.copy(wav_mode)

    elev_new[0] = 1.0 * elev_old[0]
    elev_new[-1] = 1.0 * elev_old[-1]

    ind = mp.find(elev_new != -1.0)
    if len(ind) > 1:
        elev_new_func = interp1d(dist_new[ind], elev_new[ind], kind=1)
        elev_new = elev_new_func(dist_new)

    ind = mp.find(func_class_new != -1.0)
    if len(ind) > 1:
        fc_new_func = interp1d(dist_new[ind], func_class_new[ind], kind=0)
        func_class_new = fc_new_func(dist_new)

    ind = mp.find(wav_lst_new != -1.0)
    if len(ind) > 1:
        wav_new_func = interp1d(dist_new[ind], wav_lst_new[ind], kind=0)
        wav_lst_new = wav_new_func(dist_new)

    return dist_new, elev_new, func_class_new, wav_lst_new
Пример #48
0
def map_eq(t, fs=250., eps=1e-8):
    """
    returns the longest array of time which has support points every 1 / fs ms
    that is not smaller than min[t] and does not exceed max[t]

    ==========
    Parameter:
    ==========
    t : *array*
        vector that contains a smallest and largest element.
    fs : *float*
        sampling time of new time vector
    eps : *float* (internally used!)
        

    ========
    Returns:
    ========
    teq : *array*
        an equally sampled array with a sampling rate of fs
    """
    t_min = ceil(min(t) * fs - eps)
    t_max = ceil(max(t) * fs + eps)
    return 1. / fs * arange(t_min, t_max + 1)
Пример #49
0
def map_eq(t, fs=250., eps=1e-8):
    """
    returns the longest array of time which has support points every 1 / fs ms
    that is not smaller than min[t] and does not exceed max[t]

    ==========
    Parameter:
    ==========
    t : *array*
        vector that contains a smallest and largest element.
    fs : *float*
        sampling time of new time vector
    eps : *float* (internally used!)
        

    ========
    Returns:
    ========
    teq : *array*
        an equally sampled array with a sampling rate of fs
    """
    t_min = ceil(min(t) * fs - eps)
    t_max = ceil(max(t) * fs + eps)
    return 1. / fs * arange(t_min, t_max + 1)
Пример #50
0
def analyse_bursts2_CUT(spikes, total_neurons, time_max):
    spikes_per_neuron = [[] for _ in xrange(total_neurons)]
    for spike in spikes:
        spikes_per_neuron[spike[1]].append(spike[0])

    # total_neurons = 1000
    analysing_neurons = sorted(random.sample(range(total_neurons), 1))
    print analysing_neurons

    # activity_hist_per_neuron = []
    # bins_per_neuron = []
    fft_per_neuron = []
    fft_sum = pylab.zeros(pylab.ceil(time_max*10.)/2+1)
    for i in analysing_neurons:
        a, b = calculate_activity_histogram_one_neu(
            spikes_per_neuron[i], pylab.ceil(time_max*10.))
        # activity_hist_per_neuron.append(a)
        # bins_per_neuron.append(b)
        # fft_per_neuron.append(abs(numpy.fft.rfft(a)))
        fft_sum += abs(numpy.fft.rfft(a))
        print 'Finished %f \r' % (float(i)/total_neurons),
    freqs_per_neuron = numpy.fft.rfftfreq(len(a), b[1] - b[0])
    print
    fft_sum /= len(analysing_neurons)

    # pylab.figure(10)
    # for i in analysing_neurons:
        # pylab.plot(bins_per_neuron[i], activity_hist_per_neuron[i])

    pylab.figure(11)
    # fft_sum = [sum([fft_per_neuron[neu][i]
                    # for neu in xrange(len(fft_per_neuron))])
               # for i in xrange(len(fft_per_neuron[0]))]
    # fft_sum[0] = fft_sum[1]
    fft_sum = smooth_data(fft_sum, 100)
    pylab.plot(freqs_per_neuron, fft_sum)
Пример #51
0
def _fastFourier(series, sampRate):
    """ Perform a Fast Fourier Transform """
    n = len(series)
    p = sf.fft(series)  # Fast fourier transform
    uUniquePts = int(pl.ceil((n + 1) / 2.0))
    p = p[0:uUniquePts]
    p = abs(p)
    p = p / float(n)
    p = p**2
    if n % 2 > 0:
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) - 1] = p[1:len(p) - 1] * 2
    freqArray = np.arange(0, uUniquePts, 1.0) * (sampRate / n)
    return (p, freqArray)
Пример #52
0
def getFFT(s1, n):
    p = fft(s1) # take the fourier transform
    nUniquePts = ceil((n + 1) / 2.0)
    p = p[0:nUniquePts]
    p = abs(p)
    p = p / n # scale by the number of points so that
    # the magnitude does not depend on the length
    # of the signal or on its sampling frequency
    p = p ** 2 # square it to get the power
# multiply by two (see technical document for details)
# odd nfft excludes Nyquist point
    if n % 2 > 0: # we've got odd number of points fft
        p[1:len(p)] = p[1:len(p)] * 2
    else:
        p[1:len(p) - 1] = p[1:len(p) - 1] * 2 # we've got even number of points fft
    return nUniquePts, p
Пример #53
0
def print_aligned2(w, r1, r2, n1=None, n2=None):
    if n1==None or n2==None:
        n1 = int(ceil(sqrt(w.shape[1])))
        n2 = n1
        

    assert(r1*r2==w.shape[0])

    Z = zeros(((r1+1)*n1, (r2+1)*n2), 'd')
    i1, i2 = 0, 0
    for i1 in range(n1):
        for i2 in range(n2):
            i = i1*n2+i2
            if i>=w.shape[1]: break
            Z[(r1+1)*i1:(r1+1)*(i1+1)-1, (r2+1)*i2:(r2+1)*(i2+1)-1] = w[:,i].reshape(r1,r2)
    return Z
Пример #54
0
    def genIm(self, dlg, imb, mdh):
        pixelSize = dlg.getPixelSize()

        if not pylab.mod(pylab.log2(pixelSize/self.visFr.QTGoalPixelSize), 1) == 0:#recalculate QuadTree to get right pixel size
                self.visFr.QTGoalPixelSize = pixelSize
                self.visFr.Quads = None

        self.visFr.GenQuads()

        qtWidth = self.visFr.Quads.x1 - self.visFr.Quads.x0
        qtWidthPixels = pylab.ceil(qtWidth/pixelSize)

        im = pylab.zeros((qtWidthPixels, qtWidthPixels))
        QTrend.rendQTa(im, self.visFr.Quads)

        return im[(imb.x0/pixelSize):(imb.x1/pixelSize),(imb.y0/pixelSize):(imb.y1/pixelSize)]
Пример #55
0
def logBinInt(base,s,ps):
  # Bin the data in bins with edges base**1, base**2 ... 
  # all s such that base**i < x <= base**(i+1) goes in the bin i+1.
  # It is assumed that all s are strictly posetive integers and 
  # that s is sorted.

  # Check for sorted input
  for ind in range(1,len(s)):
    if s[ind] <= s[ind-1]:
      print "Unsorted input or repeated integers input. Returning."
      return [],[]

  # Construct a list of all edges of the bins
  maxPow = pylab.ceil(pylab.log(max(s))/pylab.log(base)).astype(int)
  maxEdge = pylab.floor(base**maxPow)

  # Generate edges of the bins
  binEdge = [] 
  lastEdge = pylab.floor(base)
  exp = 1.0
  while lastEdge < maxEdge:
    if pylab.floor(base**exp) > lastEdge:
      binEdge.append(lastEdge)
      lastEdge = pylab.floor(base**exp)
    exp = exp+1
  binEdge.append(maxEdge)
  binEdge = numpy.array(binEdge)

  # Calculate bin sizes and centers
  binSize = numpy.zeros(len(binEdge))
  binCenter = numpy.zeros(len(binEdge))
  binSize[0] = binEdge[0]
  binCenter[0] = (1.0 + binEdge[0])*0.5
  for j in range(1,len(binEdge)):
    binSize[j] = binEdge[j] - binEdge[j-1]
    binCenter[j] = (binEdge[j] + binEdge[j-1] + 1)*0.5

  # Calculate bin probability
  binProb = numpy.zeros(len(binEdge))
  binNum = 0
  for data, prob in zip(s,ps):
    while data > binEdge[binNum]:
      binNum = binNum+1
    binProb[binNum] = binProb[binNum] + prob 
  binProb = binProb/binSize

  return binCenter, binProb
Пример #56
0
def save_2D_movie(frame_list, filename, frame_duration):
    """
    Saves a list of 2D numpy arrays of gray shades between 0 and 1 to a zipped tree of PNG files.
    
    Inputs:
        frame_list     - a list of 2D numpy arrays of floats between 0 and 1
        filename       - string specifying the filename where to save the data, has to end on '.zip'
        frame_duration - specifier for the duration per frame, will be stored as additional meta-data
        
    Example:
        >> import numpy
        >> framelist = []
        >> for i in range(100): framelist.append(numpy.random.random([100,100])) # creates a list of 2D numpy arrays with random values between 0. and 1.
        >> save_2D_movie(framelist, 'randommovie100x100x100.zip', 0.1)
    """
    try:
        import zipfile
    except ImportError:
        raise ImportError("ERROR: Python module zipfile not found! Needed by neurotools.plotting.save_2D_movie(...)!")
    try:
        import StringIO
    except ImportError:
        raise ImportError("ERROR: Python module StringIO not found! Needed by neurotools.plotting.save_2D_movie(...)!")
    assert PILIMAGEUSE, "ERROR: Since PIL has not been detected, the function neurotools.plotting.save_2D_movie(...) is not supported!"
    filenameConditionStr = "ERROR: Second argument of function neurotools.plotting.save_2D_movie(...) must be a string ending on \".zip\"!"
    assert (type(filename) == str) and (len(filename) > 4) and (filename[-4:].lower() == '.zip'), filenameConditionStr
    zf = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
    container = filename[:-4] # remove .zip
    frame_name_format = "frame%s.%dd.png" % ("%", pylab.ceil(pylab.log10(len(frame_list))))
    for frame_num, frame in enumerate(frame_list):
        frame_data = [(p,p,p) for p in frame.flat]
        im = Image.new('RGB', frame.shape, 'white')
        im.putdata(frame_data)
        io = StringIO.StringIO()
        im.save(io, format='png')
        pngname = frame_name_format % frame_num
        arcname = "%s/%s" % (container, pngname)
        io.seek(0)
        zf.writestr(arcname, io.read())
        progress_bar(float(frame_num)/len(frame_list))

    # add 'parameters' and 'frames' files to the zip archive
    zf.writestr("%s/parameters" % container,
                'frame_duration = %s' % frame_duration)
    zf.writestr("%s/frames" % container,
                '\n'.join(["frame%.3d.png" % i for i in range(len(frame_list))]))
    zf.close()
Пример #57
0
 def _win_mtx(self, x, w, h, nsamples=None, win=None):
     num_frames = int( pylab.ceil( x.size / float( h ) ) )
     X = pylab.zeros((w, num_frames))
     if win is None:
         win = pylab.hamming(w)
     if nsamples is None:
         frames = range(num_frames)
     else:
         frames = sort(permutation(num_frames)[0: nsamples])
     for k in frames:
         start_pos = k*h
         end_pos = start_pos + w
         if x.size < end_pos:
             X[:,k] = win * pylab.concatenate((x[start_pos:-1], pylab.zeros(w - (x.size - start_pos - 1))), axis=1)
         else:
             X[:,k] = win * x[start_pos:end_pos]
     return X.T