def gp_plot_prediction(predict_x, mean, variance = None):
    """
    Plot a gp's prediction using pylab including error bars if variance specified

    Error bars are 2 * standard_deviation as in GP for ML book
    """
    from pylab import plot, concatenate, fill
    if None != variance:
        # check variances are just about +ve - could signify a bug if not
        #assert variance.all() > -1e-10
        data = [
            (x,y,max(v,0.0))
            for x,y,v
            in zip( predict_x, mean.flat, variance  )
            ]
    else:
        data = [
            (x,y)
            for x,y
            in zip( predict_x, mean )
            ]
    data.sort( key = lambda d: d[0] ) # sort on X axis
    predict_x = [ d[0] for d in data ]
    predict_y = np.array( [ d[1] for d in data ] )
    plot( predict_x, predict_y, color='k', linestyle=':' )
    if None != variance:
        sd = np.sqrt( np.array( [ d[2] for d in data ] ) )
        var_x = concatenate((predict_x, predict_x[::-1]))
        var_y = concatenate((predict_y + 2.0 * sd, (predict_y - 2.0 * sd)[::-1]))
        p = fill(var_x, var_y, edgecolor='w', facecolor='#d3d3d3')
Exemplo n.º 2
0
def homog2D(xPrime, x):
    """
    
    Compute the 3x3 homography matrix mapping a set of N 2D homogeneous 
    points (3xN) to another set (3xN)

    """

    numPoints = xPrime.shape[1]
    assert numPoints >= 4

    A = None
    for i in range(0, numPoints):
        xiPrime = xPrime[:, i]
        xi = x[:, i]

        Ai_row0 = pl.concatenate((pl.zeros(3), -xiPrime[2] * xi, xiPrime[1] * xi))
        Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(3), -xiPrime[0] * xi))
        Ai = pl.row_stack((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack((A, Ai))

    U, S, V = pl.svd(A)
    V = V.T
    h = V[:, -1]
    H = pl.reshape(h, (3, 3))
    return H
Exemplo n.º 3
0
def homog3D(points2d, points3d):
    """
    
    Compute a matrix relating homogeneous 3D points (4xN) to homogeneous
    2D points (3xN)

    Not sure why anyone would do this.  Note that the returned transformation 
    *NOT* an isometry.  But it's here... so deal with it.

    """

    numPoints = points2d.shape[1]
    assert numPoints >= 4

    A = None
    for i in range(0, numPoints):
        xiPrime = points2d[:, i]
        xi = points3d[:, i]

        Ai_row0 = pl.concatenate((pl.zeros(4), -xiPrime[2] * xi, xiPrime[1] * xi))
        Ai_row1 = pl.concatenate((xiPrime[2] * xi, pl.zeros(4), -xiPrime[0] * xi))
        Ai = pl.row_stack((Ai_row0, Ai_row1))

        if A is None:
            A = Ai
        else:
            A = pl.vstack((A, Ai))

    U, S, V = pl.svd(A)
    V = V.T
    h = V[:, -1]
    P = pl.reshape(h, (3, 4))
    return P
Exemplo n.º 4
0
def example():

    from pylab import rand, ones, concatenate
    import matplotlib.pyplot as plt
    # EXAMPLE data code from:
    # http://matplotlib.sourceforge.net/pyplots/boxplot_demo.py
    # fake up some data
    spread= rand(50) * 100
    center = ones(25) * 50
    flier_high = rand(10) * 100 + 100
    flier_low = rand(10) * -100
    data =concatenate((spread, center, flier_high, flier_low), 0)

    # fake up some more data
    spread= rand(50) * 100
    center = ones(25) * 40
    flier_high = rand(10) * 100 + 100
    flier_low = rand(10) * -100
    d2 = concatenate( (spread, center, flier_high, flier_low), 0 )
    data.shape = (-1, 1)
    d2.shape = (-1, 1)
    #data = [data, d2, d2[::2,0]]
    data = [data, d2]

    fig = plt.figure()
    ax = fig.add_subplot(1,1,1)
    ax.set_xlim(0,4)
    percentile_box_plot(ax, data, [2,3])
    plt.show()
Exemplo n.º 5
0
def density_plot ( x, D ):
    """Plot the density D along with a confidence region"""
    # TODO: pass parameters through (e.g. color, axes, ...)
    fx = D(x)
    x_ = pl.concatenate ( (x, x[::-1]) )
    fx_ = pl.clip(pl.concatenate ( (fx+D.c,fx[::-1]-D.c) ), 0, pl.inf )
    pl.fill ( x_, fx_, edgecolor=[.5]*3, facecolor=[.8]*3 )
    pl.plot ( x, fx, color=[0]*3 )
Exemplo n.º 6
0
def drawBetween(x, yl, yh, col, lw, alpha = 1, plot = pylab.plot) :
  fx = pylab.concatenate( (x,x[::-1]) )
  fy = pylab.concatenate( (yh,yl[::-1]) )

  # probably does not work with log??
  p = pylab.fill(fx, fy, facecolor=col, lw = 0, alpha = alpha)
  if lw :
    plot(x, yl, x, yh, aa = 1, alpha = alpha, lw = lw, color='k')
Exemplo n.º 7
0
Arquivo: UVW.py Projeto: nodarai/acdc
 def shadowing(self):
     "Select the shadowed antennas from the FLAG column and return the index of the shadowed measurement and the percentage of shadowing "
     
     indexFlag=pl.concatenate((pl.where(self.f==1)[0],pl.where(self.ff[0,0,])[0],pl.where(self.ff[1,0,])[0]))
     indexNoFlag=pl.concatenate((pl.where(self.f==0)[0],pl.where(self.ff[0,0,]==False)[0],pl.where(self.ff[1,0,]==False)[0]))
     
     Ntot=len(indexFlag)+len(indexNoFlag)
     fractionShadow=100.*len(indexFlag)/Ntot
     
     return(indexFlag,fractionShadow)
Exemplo n.º 8
0
 def int_peak(self,fitrange=None, intrange=None, normalize=False, plot=False, npoints=10):
     """
     Fits a linear background, subtracts the background, and integrates. Intended to be used for integrating peaks.
     
     wavelen : list
         list of wavelengths in nm. Can be sorted from low to high or high to low
     lum : list
         list of luminescence
     fitrange : 2-element list, optional
         Defaults to the span of the data. Input: [low nm, high nm]
     intrange : 2-element list, optional
         Defaults to the span of the data or fitrange (if given). Input: [low nm, high nm]
     normalize : boolean, optional
         Default is False
     plot : boolean, optional
         Default is False. Plots the original data, the linear background, and the data with the background subtracted
     npoints : int
         Default is 10. Number of points above and below the given fitrange point to average over.
     """
     if fitrange is None:
         fitindex=[0+npoints/2, len(self._wavelen)-1-npoints/2]
     else:
         fitindex=[0, 0]
         fitindex[0]=py.where(self._wavelen>fitrange[0])[0][0]
         fitindex[1]=py.where(self._wavelen>fitrange[1])[0][0]
     
     wavelenfit=py.concatenate((self._wavelen[fitindex[0]-npoints/2:fitindex[0]+npoints/2], 
                            self._wavelen[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))
     lumfit=py.concatenate((self._lum[fitindex[0]-npoints/2:fitindex[0]+npoints/2], 
                         self._lum[fitindex[1]-npoints/2:fitindex[1]+npoints/2]))
     linearfit = py.polyfit(wavelenfit, lumfit, 1)
     linear_bg = py.polyval( linearfit, self._wavelen[fitindex[0]:fitindex[1]+1] )
     wavelen_bg = self._wavelen[fitindex[0]:fitindex[1]+1].copy()
     lum_bg = self._lum[fitindex[0]:fitindex[1]+1].copy()
     lum_bg -= linear_bg
     
     if plot is True:
         py.plot(self._wavelen,self._lum,'k')
         py.plot(wavelen_bg,linear_bg,'k:')
         py.plot(wavelen_bg,lum_bg,'r')
         py.show()
     
     intindex=[0,0]
     if intrange is None:
         wavelen_int = wavelen_bg
         lum_int = lum_bg  
     else:
         intindex[0]=py.where(wavelen_bg>intrange[0])[0][0]
         intindex[1]=py.where(wavelen_bg>intrange[1])[0][0]    
         wavelen_int = wavelen_bg[intindex[0]:intindex[1]+1]
         lum_int = lum_bg[intindex[0]:intindex[1]+1]
     
     peak_area = py.trapz(lum_int, x=wavelen_int)
     return peak_area
Exemplo n.º 9
0
def datagen(N):
    """
    Produces N pairs of training data and desired output;
    each sample of training data contains -1 in its first position,
    this corresponds to the interpretation of the threshold as first
    element of the weight vector
    """

    fun1 = lambda x1,x2: -2*x1**3-x2+.5*x1**2
    fun2 = lambda x1,x2: x1**2*x2+2*x1*x2+1
    fun3 = lambda x1,x2: .5*x1*x2**2+x2**2-2*x1**2
    
    rarr1 = rand(1,N)
    rarr2 = rand(1,N)
    
    teacher = sign(rand(1,N)-.5)
    
    idplus  = (teacher<0)
    idminus = -idplus
    
    rarr1[idplus] = rarr1[idplus]-1
    
    y1=fun1(rarr1,rarr2)
    y2=fun2(rarr1,rarr2)
    y3=fun3(rarr1,rarr2)
    
    x=transpose(concatenate((-ones((1,N)),y1,y2)))
    
    return x, teacher[0]
Exemplo n.º 10
0
def fixation_box_samples(all_x, all_y, fix_w, dwell_times, f_samp = 200.0):
  """Collect all x and ys for all trials for when the eye is within the fixation
  box."""
  n_trials = len(all_x)
  in_fix_box_x = pylab.array([],dtype=float)
  in_fix_box_y = pylab.array([],dtype=float)
  for tr in range(n_trials):
    if dwell_times[tr,0] >= 0:
      # We got a fixation
      start_idx = int(f_samp * dwell_times[tr,0]/1000.0)
      end_idx = -1
      if dwell_times[tr,1] >= 0:
        end_idx = int(f_samp * dwell_times[tr,1]/1000.0) - 5
      in_fix_box_x = pylab.concatenate((in_fix_box_x, all_x[tr][start_idx:end_idx]))
      in_fix_box_y = pylab.concatenate((in_fix_box_y, all_y[tr][start_idx:end_idx]))
  return in_fix_box_x, in_fix_box_y    
Exemplo n.º 11
0
 def set_pdf(self, x, p, Nrl = 1000):
   """Generate the lookup tables. 
   x is the value of the random variate
   pdf is its probability density
   cdf is the cumulative pdf
   inversecdf is the inverse look up table
   
   """
   
   self.x = x
   self.pdf = p/p.sum() #normalize it
   self.cdf = self.pdf.cumsum()
   self.inversecdfbins = Nrl
   self.Nrl = Nrl
   y = pylab.arange(Nrl)/float(Nrl)
   delta = 1.0/Nrl
   self.inversecdf = pylab.zeros(Nrl)    
   self.inversecdf[0] = self.x[0]
   cdf_idx = 0
   for n in xrange(1,self.inversecdfbins):
     while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
       cdf_idx += 1
     self.inversecdf[n] = self.x[cdf_idx-1] + (self.x[cdf_idx] - self.x[cdf_idx-1]) * (y[n] - self.cdf[cdf_idx-1])/(self.cdf[cdf_idx] - self.cdf[cdf_idx-1]) 
     if cdf_idx >= Nrl:
       break
   self.delta_inversecdf = pylab.concatenate((pylab.diff(self.inversecdf), [0]))
Exemplo n.º 12
0
def decimate_trace( tr, newsps ):
    """
    Function to correctly apply some decimation filters

    """

    logging.debug('Now try to decimate the data')


    # Find samplerate
    tr.record = 0
    oldsps = tr.getv('samprate')[0]

    total = tr.record_count

    # no need. return
    if oldsps == newsps: return tr

    # Need to double our traces
    for rec in range(tr.record_count):
        tr.record = rec
        data = tr.trdata()
        zeros_data = zeros( len(data) )
        ones_data = ones( len(data) )
        ones_data *= data[0]
        tr.trputdata( concatenate([zeros_data , data]) )

    # Bring pointer back
    tr.record = datascope.dbALL

    try:
        tr.trfilter('DECIMATE BY %i' % oldsps)
    except Exception,e:
        logging.error('decimate %s: %s' % (Exception,e))
Exemplo n.º 13
0
def old_spike_psth(data, t1_ms = -250., t2_ms = 0., bin_ms = 10):
  """Uses data format returned by get_spikes"""
  spike_time_ms = data['spike times ms']
  N_trials = data['trials']
  t2_ms = pylab.ceil((t2_ms - t1_ms) / bin_ms)*bin_ms + t1_ms
  N_bins = (t2_ms - t1_ms) / bin_ms
  
  if N_trials > 0:
    all_spikes_ms = pylab.array([],dtype=float)
    for trial in range(len(spike_time_ms)):
      if spike_time_ms[trial] is None:
        continue
      idx = pylab.find((spike_time_ms[trial] >= t1_ms) & 
                       (spike_time_ms[trial] <= t2_ms))
      all_spikes_ms = \
        pylab.concatenate((all_spikes_ms, spike_time_ms[trial][idx]))
    spike_n_bin, bin_edges = \
      pylab.histogram(all_spikes_ms, bins = N_bins, 
                      range = (t1_ms, t2_ms), new = True)

    spikes_per_trial_in_bin = spike_n_bin/float(N_trials) 
    spike_rate = 1000*spikes_per_trial_in_bin/bin_ms
  else:
    spike_rate = pylab.nan
  
  bin_center_ms = (bin_edges[1:] + bin_edges[:-1])/2.0

  return spike_rate, bin_center_ms
Exemplo n.º 14
0
    def getCloneReplicates(self, clone, source, condition, applyFilter=False):
        '''Retrieve all growth curves for a clone+source+condition'''
        # Check if any other replicates should be returned
        # retArray is a 2xN multidimensional numpy array
        retArray = py.array([])
        first = True
        for i in xrange(1, self.numReplicates[clone] + 1):
            # Get replicate
            filterMe = self.dataHash[clone][i][source][condition]['filter']
            currCurve = self.dataHash[clone][i][source][condition]['od']

            # Check if filter is enabled and curve should be filtered
            if applyFilter and filterMe:
                continue

            # Create multidimensional array if first
            elif first:
                retArray = py.array([currCurve])
                first = False

            # Append to multidimensional array if not first
            else:
                retArray = py.concatenate((retArray,
                                           py.array([currCurve])))

        return retArray
Exemplo n.º 15
0
    def getCloneReplicates(self, clone, w, applyFilter=False):
        '''Retrieve all growth curves for a clone+well'''
        # Check if any other replicates should be returned
        # retArray is a 2xN multidimensional numpy array
        retArray = py.array([])
        first = True
        for rep in self.replicates[clone]:
            # Get replicate
            filterMe = self.dataHash[clone][rep][w]['filter']
            currCurve = self.dataHash[clone][rep][w]['od']

            # Check if filter is enabled and curve should be filtered
            if applyFilter and filterMe:
                continue

            # Create multidimensional array if first
            elif first:
                retArray = py.array([currCurve])
                first = False

            # Append to multidimensional array if not first
            else:
                retArray = py.concatenate((retArray,
                                           py.array([currCurve])))

        return retArray
Exemplo n.º 16
0
def px_smooth(idx, e, x, idx_table, N_HE0, N_US, N_US_HE, WC):
    """Over sample, smooth and undersample photoionization cross-sections
    """
    i, nmin, ntot, m, l, p, pos = idx_table[idx]

    try:
        # case of TOPBASE data
        nmin.index(".")
        nmin = pl.nan
    except ValueError:
        nmin = int(nmin)

    # Keep sampling for high energy values where the variation follow Kramer's law
    if isinstance(int(ntot) - nmin, int):
        N_HE = int(ntot) - nmin
    else:
        N_HE = N_HE0

    if N_HE >= e.size:
        N_HE = -e.size
        print("Warning: N_HE is larger than photoionization table, select all the table.")

    e_sel = e[:-N_HE]
    e_sel_log = pl.log10(e_sel)
    x_sel = x[:-N_HE]

    # Interpolate and smooth data
    # e_i = pl.linspace(min(e_sel), max(e_sel), 10000)
    e_i_log = pl.linspace(min(e_sel_log), max(e_sel_log), 10000)
    e_i = 10 ** e_i_log
    x_i = pl.interp(e_i, e_sel, x_sel)
    x_is = smooth(x_i, WC)
    e_us = pl.concatenate([e_i[0:10], e_i[::N_US], e[int(ntot) - N_HE :: N_US_HE]])
    x_us = pl.concatenate([x_is[0:10], x_is[::N_US], x[int(ntot) - N_HE :: N_US_HE]])

    if x_us.any() == 0.0:
        print("x_us = 0")
        quit(1)

    # Conservation of area
    # area = pl.trapz( x_Mb, e_eV)   # total
    # area = pl.trapz( e_sel, x_sel) # selected
    area_i = pl.trapz(x_i, e_i)  # selected interpolated
    area_is = pl.trapz(x_is, e_i)  # selected interpolated and sampled
    # area_us = pl.trapz(x_us, e_us)

    return e_us, x_us, area_i, area_is
Exemplo n.º 17
0
def eye_sample_insert_interval(R):
  tt = R.data['Trials']['eyeXData']['Trial Time']  
  n_trials = len(tt)
  d_esii = pylab.array([],dtype=float)
  for tr in range(n_trials):
    d_esii = pylab.concatenate((d_esii,pylab.diff(tt[tr])))

  return d_esii
Exemplo n.º 18
0
    def __fake_boxplot_data( self ):
        spread = pylab.rand(50) * 100
        center = pylab.ones(25) * 50
        flier_high = pylab.rand(10) * 100 + 100
        flier_low = pylab.rand(10) * -100
        data = pylab.concatenate( (spread, center, flier_high, flier_low), 0 )

        spread = pylab.rand(50) * 100
        center = pylab.ones(25) * 40
        flier_high = pylab.rand(10) * 100 + 100
        flier_low = pylab.rand(10) * -100
        d2 = pylab.concatenate( (spread, center, flier_high, flier_low), 0 )
        data.shape = (-1, 1)
        d2.shape = (-1, 1)
        data = [ data, d2, d2[::2,0] ]

        return data
Exemplo n.º 19
0
 def getmovingAveragedData(self,window_size_GHz=-1):
     #so far unelegant way of convolving the columns one by one
     #even not nice, improvement possible?
     if window_size_GHz<0.5e9:
         window_size=int(self.getEtalonSpacing()/self.getfbins())
     else:
         window_size=int(window_size_GHz/self.getfbins())
           
     window_size+=window_size%2+1
     window=py.ones(int(window_size))/float(window_size)
     
     dataabs=py.convolve(self.getFAbs(), window, 'valid')
     dataph=py.convolve(self.getFPh(), window, 'valid')
     one=py.ones((window_size-1)/2,)
     dataabs=py.concatenate((dataabs[0]*one,dataabs,dataabs[-1]*one))
     dataph=py.concatenate((dataph[0]*one,dataph,dataph[-1]*one))
     return py.column_stack((self.fdData[:,:3],dataabs,dataph,self.fdData[:,5:]))
Exemplo n.º 20
0
def concatenateRT(data, axis=0):
    if data.ndim != 2:
        return
    if axis == 1:
        datatmp = data.swapaxes(0,1)
    else:
        datatmp = data
    return tuple([P.concatenate(tuple(datatmp[i,:])) for i in range(datatmp.shape[0]) ])
Exemplo n.º 21
0
	def sigma_vectors_weights(self):

		"""
		generator for the sigma vectors' weights

		Returns
		----------
		Wm_i : ndarray
			array of sigma points' weights 
		Wc_i : ndarray
			array of sigma points' weights 
		"""
		Wm0=[self.lamda/(self.lamda+self.nx)]
		Wc0=[(self.lamda/(self.lamda+self.nx))+1-self.alpha_sigma_points**2+self.beta_sigma_points]
		Wmc=[1./(2*(self.nx+self.lamda))]
		Wm_i=pb.concatenate((Wm0,2*self.nx*Wmc)) #ndarray 2n_x+1
		Wc_i=pb.concatenate((Wc0,2*self.nx*Wmc)) #ndarray 2n_x+1
		return Wm_i,Wc_i
Exemplo n.º 22
0
	def sigma_vectors_weights(self):

		"""
		generates  sigma vector weights

		Returns
		----------
		Wm_i : ndarray
			array of sigma points' weights 
		Wc_i : ndarray
			array of sigma points' weights 
		"""
		Wm0=[self.lamda/(self.lamda+self.L)]
		Wc0=[(self.lamda/(self.lamda+self.L))+1-self.alpha_sigma_points**2+self.beta_sigma_points]
		Wmc=[1./(2*(self.L+self.lamda))]
		Wm_i=pb.concatenate((Wm0,2*self.L*Wmc)) 
		Wc_i=pb.concatenate((Wc0,2*self.L*Wmc)) 
		return Wm_i,Wc_i
Exemplo n.º 23
0
def fixation_box_samples(all_x, all_y, fix_w, dwell_times, f_samp=200.0):
    """Collect all x and ys for all trials for when the eye is within the fixation
  box."""
    n_trials = len(all_x)
    in_fix_box_x = pylab.array([], dtype=float)
    in_fix_box_y = pylab.array([], dtype=float)
    for tr in range(n_trials):
        if dwell_times[tr, 0] >= 0:
            # We got a fixation
            start_idx = int(f_samp * dwell_times[tr, 0] / 1000.0)
            end_idx = -1
            if dwell_times[tr, 1] >= 0:
                end_idx = int(f_samp * dwell_times[tr, 1] / 1000.0) - 5
            in_fix_box_x = pylab.concatenate(
                (in_fix_box_x, all_x[tr][start_idx:end_idx]))
            in_fix_box_y = pylab.concatenate(
                (in_fix_box_y, all_y[tr][start_idx:end_idx]))
    return in_fix_box_x, in_fix_box_y
Exemplo n.º 24
0
def spikecv(timestamps,
            start_time=0,
            zero_times=0,
            end_time=None,
            window_len=.1):
    """Given the time stamps compute the coefficient of variation with a jumping window.
  Returns cv and rate as an array.
  Inputs:
    timestamps - the spike timestamps
    start_time - time rel to zero_time we end our windows (needs to be <= 0).
                 If zero, means no pre windows.
                 If None, means prewindows stretch to begining of data
                 The start_time is extended to include an integer number of windows
    zero_times  - reference time. Can be a zx1 array, in which case will give us an array of windows. If scalar
                 will only give one set of windows.
    end_time   - time rel to zero_time we end our windows (needs to >= 0)
                 If zero, means no post-windows
                 If None, means post-windows stretch to end of data
                 The end_time is extended to include an integer number of windows
    window_len - length of window to look at spikes (in same units as time stamps)

  Outputs:
    t  - time of the center of the window
    cv
    rate - in inverse units of timestamp
  """

    window_edges, windows, subwindows = window_spike_train(
        timestamps, start_time, zero_times, end_time, window_len=window_len)
    isi = pylab.diff(timestamps)
    if windows.shape[1]:
        windows[:, -1, 1] -= 1  #we have one less isi sample than timestamps

    t = pylab.zeros(windows.shape[1])
    cv = pylab.zeros(windows.shape[1])
    rate = pylab.zeros(windows.shape[1])

    for n in xrange(windows.shape[1]):
        collected_isi = pylab.array([])
        for m in xrange(windows.shape[0]):
            #CV computation
            collected_isi = pylab.concatenate(
                (collected_isi, isi[windows[m, n, 0]:windows[m, n, 1]]))

        if collected_isi.size > 0:
            mean = collected_isi.mean()
            std = collected_isi.std()
            cv[n] = std / mean
            rate[n] = 1. / mean
        else:
            cv[n] = 0
            rate[n] = 0

        #t[n] = window_len * (n + .5)
    t = (window_edges[1:] + window_edges[:-1]) / 2
    return t, cv, rate
Exemplo n.º 25
0
def my_prepADCcalib_CrsFn(ADCcalibFilePath, ADCcalibFilePrefix, NGroup):
    """ TS1.0 calibration: .h5 ADCcalib filepath,prefix => 4x numpy arrays (Crs/Fn, gain/offset)
    prefix is file name without _Coarse/Fine Gain/Offset Array.h5
    """
    #
    # load data from h5
    ADCcalibFile_CoarseGain = ADCcalibFilePath + ADCcalibFilePrefix + "_CoarseGainArray.h5"
    ADCcalibFile_CoarseOffset = ADCcalibFilePath + ADCcalibFilePrefix + "_CoarseOffsetArray.h5"
    ADCcalibFile_FineGain = ADCcalibFilePath + ADCcalibFilePrefix + "_FineGainArray.h5"
    ADCcalibFile_FineOffset = ADCcalibFilePath + ADCcalibFilePrefix + "_FineOffsetArray.h5"
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseGain, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_CoarseGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_CoarseOffset, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_CoarseOffset_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineGain, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_FineGain_160x7Array = numpy.array(myh5dataset)
    #
    my5hfile = h5py.File(ADCcalibFile_FineOffset, "r")
    myh5dataset = my5hfile["/data/data/"]
    my5hfile.close
    ADCcalib_FineOffset_160x7Array = numpy.array(myh5dataset)
    #
    ADCcalibArr_CoarseGain = pylab.copy(ADCcalib_CoarseGain_160x7Array)
    ADCcalibArr_CoarseOffset = pylab.copy(ADCcalib_CoarseOffset_160x7Array)
    ADCcalibArr_FineGain = pylab.copy(ADCcalib_FineGain_160x7Array)
    ADCcalibArr_FineOffset = pylab.copy(ADCcalib_FineOffset_160x7Array)
    for iGroup in range(NGroup - 1):
        # -1 because there is already a copy of it
        ADCcalibArr_CoarseGain = pylab.concatenate((ADCcalibArr_CoarseGain, ADCcalib_CoarseGain_160x7Array))
        ADCcalibArr_CoarseOffset = pylab.concatenate((ADCcalibArr_CoarseOffset, ADCcalib_CoarseOffset_160x7Array))
        ADCcalibArr_FineGain = pylab.concatenate((ADCcalibArr_FineGain, ADCcalib_FineGain_160x7Array))
        ADCcalibArr_FineOffset = pylab.concatenate((ADCcalibArr_FineOffset, ADCcalib_FineOffset_160x7Array))
    #
    return ADCcalibArr_CoarseGain, ADCcalibArr_CoarseOffset, ADCcalibArr_FineGain, ADCcalibArr_FineOffset
Exemplo n.º 26
0
 def forward(self, xs):
     # print 'xs shape', xs.shape
     outputs = [net.forward(xs) for net in self.nets]
     # print 'out1', len(outputs)
     outputs = zip(*outputs)
     # print 'out2', len(outputs)
     # print 'out2', len(outputs), [(x.shape, y.shape) for x,y in outputs]
     outputs = [concatenate(l) for l in outputs]
     # print 'out3', len(outputs), [x.shape for x in outputs]
     # print outputs
     return outputs
Exemplo n.º 27
0
 def getDR(self):
     #this function should return the dynamic range
     #this should be the noiselevel of the fft
     noiselevel=py.sqrt(py.mean(abs(py.fft(self._tdData.getAllPrecNoise()[0]))**2))
     #apply a moving average filter on log
     window_size=5
     window=py.ones(int(window_size))/float(window_size)
     hlog=py.convolve(20*py.log10(self.getFAbs()), window, 'valid')
     one=py.ones((2,))
     hlog=py.concatenate((hlog[0]*one,hlog,hlog[-1]*one))
     return hlog-20*py.log10(noiselevel)         
Exemplo n.º 28
0
 def computeMomentumEvolution(self):
     self.momentumPhase = py.exp(-1j * 2 * py.pi * py.diag(
         self.x) @ py.ones([len(self.x), len(self.k)]) @ py.diag(self.k))
     self.momentumEvolution = self.timeEvolution @ self.momentumPhase
     # normalisation of momentum
     self.momentumEvolution *= self.dx / py.sqrt(2 * py.pi)
     self.momentumDensity = abs(self.momentumEvolution)**2
     X = py.array(py.sum(self.momentumDensity, axis=1)**(-1))[:, py.newaxis]
     # normalisation of momentum density
     self.momentumDensity = self.momentumDensity * \
         py.concatenate(len(self.k) * [X], axis=1) / self.dk
Exemplo n.º 29
0
 def show_stuff(self, mf=True):
     from pylab import concatenate, show, print_aligned
     sample_fn = [Rsigmoid, sigmoid][mf]
     W  = self[0]
     V1 = self.V1
     H1 = sample_fn(W * V1)
     V2 = sample_fn(W.T() * H1)        
     
     V_io = concatenate([[x,y] for x,y in zip(V1, V2)])
     
     show(print_aligned(V_io.T))
Exemplo n.º 30
0
 def forward(self,xs):
     # print 'xs shape', xs.shape
     outputs = [net.forward(xs) for net in self.nets]
     # print 'out1', len(outputs)
     outputs = zip(*outputs)
     # print 'out2', len(outputs)
     # print 'out2', len(outputs), [(x.shape, y.shape) for x,y in outputs]
     outputs = [concatenate(l) for l in outputs]
     # print 'out3', len(outputs), [x.shape for x in outputs]
     # print outputs
     return outputs
Exemplo n.º 31
0
    def show_stuff(self, mf=True):
        from pylab import concatenate, show, print_aligned
        sample_fn = [Rsigmoid, sigmoid][mf]
        W = self[0]
        V1 = self.V1
        H1 = sample_fn(W * V1)
        V2 = sample_fn(W.T() * H1)

        V_io = concatenate([[x, y] for x, y in zip(V1, V2)])

        show(print_aligned(V_io.T))
Exemplo n.º 32
0
Arquivo: misc.py Projeto: MMaus/mutils
 def mov_avg(data, tailLength):
     """
     returns the moving average for a 1D array data
     """
     data1 = concatenate((data[tailLength:0:-1],data,data[-tailLength:]))
     #print "mov avg idat shape:", data1.shape, "tailLength:", tailLength
     avgFilter = array([1./(2.*tailLength+1.),]*(2*tailLength + 1))
     #print "avgFilter:", avgFilter.shape
     res = convolve(data1,avgFilter)[2*tailLength:-2*tailLength]
     #print "mov avg shape:", res.shape
     return res
Exemplo n.º 33
0
def plot_net_survival(db, country_list):
    import pylab as pl
    import settings

    pl.clf()
    ii = 0.0
    for k, p in sorted(db.items()):
        country = k.split("_")[2]  # TODO: refactor k.split into function
        if country not in country_list:
            continue
        pr = pl.sort(p.__getattribute__("Pr[net is lost]").gettrace())
        pr0 = pr[0.025 * len(pr)]
        pr1 = pr[0.975 * len(pr)]

        t = pl.arange(0, 5, 0.1)
        pct0 = 100.0 * pl.where(t < 3, (1 - pr0) ** t, 0.0)
        pct1 = 100.0 * pl.where(t < 3, (1 - pr1) ** t, 0.0)

        pl.fill(
            pl.concatenate((t, t[::-1])),
            pl.concatenate((pct0, pct1[::-1])),
            alpha=0.9,
            linewidth=3,
            facecolor="none",
            edgecolor=pl.cm.spectral(ii / len(country_list)),
            label=country,
        )
        pl.fill(
            pl.concatenate((t, t[::-1])),
            pl.concatenate((pct0, pct1[::-1])),
            alpha=0.5,
            linewidth=0,
            facecolor=pl.cm.spectral(ii / len(country_list)),
            zorder=-ii,
        )
        ii += 1.0
    pl.legend()
    pl.ylabel("Nets Remaining (%)")
    pl.xlabel("Time in household (years)")
    pl.title("LLIN Survival Curve Posteriors")
    pl.savefig(settings.PATH + "net_survival.png")
Exemplo n.º 34
0
    def SmoothAlignments(self):
        ali_file = self.AliFile
        f = open(ali_file, 'r')
        lines = f.readlines()
        f.close()

        self._lst_labels = []
        self.UtteranceIds = []
        self.RawFileList = []


        self.Utt2Index = {}
        label_dim = -1
        for line in lines:
            parts = line.rstrip('\n').split()
            utterance_id = parts[0]
            cur_labels = array([int(x) for x in parts[1:]])
            I = find(cur_labels % 3 ==0)
            I2 = find(I[1:] != (I[:-1]+1))
            starts = concatenate(([0], I[I2+1]))
            ends = concatenate((I[I2+1], [cur_labels.size]))
            smoothed_labels = zeros(cur_labels.size, 'int')
            for (s,e) in zip(starts, ends):
               a = array(linspace(s,e,4), 'int')
               smoothed_labels[a[0]:a[1]] = cur_labels[s]
               smoothed_labels[a[1]:a[2]] = cur_labels[s]+1
               smoothed_labels[a[2]:a[3]] = cur_labels[s]+2

            #self._lst_labels.append(array([int(x) for x in parts[1:]]))
            self._lst_labels.append(smoothed_labels)
            label_dim = max(label_dim, self._lst_labels[-1].max()+1)
            self.UtteranceIds.append(utterance_id)
            self.RawFileList.append(os.path.join(self.db_path,
                                          utterance_id + ".htk"))
            self.Utt2Index[utterance_id] = len(self.RawFileList)-1

        try:
            self.label_dim = max(label_dim, self.label_dim)
        except AttributeError:
            self.label_dim = label_dim
            print "No label_dim in file"
Exemplo n.º 35
0
 def __mul__(self, X):
     if not self.TR:  # do the usual thing
         a = 0
         b = self.w[0].v
         H = self.w[0] * X[:,a:b]
         for i in range(1, len(self)):
             a = b
             b += self.w[i].v
             H += self.w[i] * X[:,a:b]                
         return H
     else:
         return concatenate([x.transpose()*X for x in self.w], 1)
Exemplo n.º 36
0
def plotenvelopex(ip, t1, ap1, sele, eele, t2, ap2, sele2, eele2):
    """
  plotenvelope("ip5",t1,ap1,"mqy_4l5_b1","mqy_4r5_b1",t2,ap2,"mqy_4l5_b2","mqy_4r5_b2")
  """
    #select
    yip5 = (ap1.co[t1._row_ref[ip], 0] + ap2.co[t2._row_ref[ip], 0]) / 2
    xip5 = (ap1.co[t1._row_ref[ip], 2] + ap2.co[t2._row_ref[ip], 2]) / 2
    idxs1 = t1._row_ref[sele]
    idxe1 = t1._row_ref[eele]
    idxs2 = t2._row_ref[sele2]
    idxe2 = t2._row_ref[eele2]
    # start plot
    _p.hold(True)
    _p.title("Horizontal beam envelope")
    _p.xlabel(r"$z [\rm{m}]$")
    _p.ylabel(r"$x [\rm{m}]$")
    _p.grid(True)
    # closed orbit
    x1 = ap1.co[idxs1:idxe1, 2] - xip5
    y1 = ap1.co[idxs1:idxe1, 0] - yip5
    _p.plot(y1, x1, color=[0, 0, 1])
    x2 = ap2.co[idxs2:idxe2, 2] - xip5
    y2 = ap2.co[idxs2:idxe2, 0] - yip5
    _p.plot(y2, x2, color=[1, 0, 0])
    # beam1
    x1 = ap1.xp[idxs1:idxe1, 2] - xip5
    y1 = ap1.xp[idxs1:idxe1, 0] - yip5
    x2 = ap1.xm[idxs1:idxe1, 2] - xip5
    y2 = ap1.xm[idxs1:idxe1, 0] - yip5
    x = _p.concatenate((x1, x2[::-1]))
    y = _p.concatenate((y1, y2[::-1]))
    _p.fill(y, x, facecolor='b', alpha=0.2)
    # beam2
    x1 = ap2.xp[idxs2:idxe2, 2] - xip5
    y1 = ap2.xp[idxs2:idxe2, 0] - yip5
    x2 = ap2.xm[idxs2:idxe2, 2] - xip5
    y2 = ap2.xm[idxs2:idxe2, 0] - yip5
    x = _p.concatenate((x1, x2[::-1]))
    y = _p.concatenate((y1, y2[::-1]))
    _p.fill(y, x, facecolor='r', alpha=0.2)
Exemplo n.º 37
0
def plotenvelopex(ip,t1,ap1,sele,eele,t2,ap2,sele2,eele2):
  """
  plotenvelope("ip5",t1,ap1,"mqy_4l5_b1","mqy_4r5_b1",t2,ap2,"mqy_4l5_b2","mqy_4r5_b2")
  """
  #select
  yip5=(ap1.co[t1._row_ref[ip],0]+ap2.co[t2._row_ref[ip],0])/2
  xip5=(ap1.co[t1._row_ref[ip],2]+ap2.co[t2._row_ref[ip],2])/2
  idxs1=t1._row_ref[sele]
  idxe1=t1._row_ref[eele]
  idxs2=t2._row_ref[sele2]
  idxe2=t2._row_ref[eele2]
  # start plot
  _p.hold(True)
  _p.title("Horizontal beam envelope")
  _p.xlabel(r"$z [\rm{m}]$")
  _p.ylabel(r"$x [\rm{m}]$")
  _p.grid(True)
  # closed orbit
  x1=ap1.co[idxs1:idxe1,2]-xip5
  y1=ap1.co[idxs1:idxe1,0]-yip5
  _p.plot(y1,x1,color=[0,0,1])
  x2=ap2.co[idxs2:idxe2,2]-xip5
  y2=ap2.co[idxs2:idxe2,0]-yip5
  _p.plot(y2,x2,color=[1,0,0])
  # beam1
  x1=ap1.xp[idxs1:idxe1,2]-xip5
  y1=ap1.xp[idxs1:idxe1,0]-yip5
  x2=ap1.xm[idxs1:idxe1,2]-xip5
  y2=ap1.xm[idxs1:idxe1,0]-yip5
  x = _p.concatenate( (x1,x2[::-1]) )
  y = _p.concatenate( (y1,y2[::-1]) )
  _p.fill(y, x, facecolor='b',alpha=0.2)
  # beam2
  x1=ap2.xp[idxs2:idxe2,2]-xip5
  y1=ap2.xp[idxs2:idxe2,0]-yip5
  x2=ap2.xm[idxs2:idxe2,2]-xip5
  y2=ap2.xm[idxs2:idxe2,0]-yip5
  x = _p.concatenate( (x1,x2[::-1]) )
  y = _p.concatenate( (y1,y2[::-1]) )
  _p.fill(y, x, facecolor='r',alpha=0.2)
Exemplo n.º 38
0
 def save_Callback(self):
     """
     Save acquired data
     """
     filepath = os.path.join(self.lastSaveDir, self.lastSaveFile)
     filename = QtGui.QFileDialog.getSaveFileName(None, 'Select log file',
                                                  filepath)
     filename = str(filename)
     if filename:
         data = pylab.concatenate((self.t, self.data), axis=1)
         pylab.savetxt(filename, data)
         self.lastSaveDir = os.path.split(filename)[0]
         self.lastSaveFile = os.path.split(filename)[1]
Exemplo n.º 39
0
Arquivo: misc.py Projeto: MMaus/mutils
 def mov_avg(data, tailLength):
     """
     returns the moving average for a 1D array data
     """
     data1 = concatenate((data[tailLength:0:-1], data, data[-tailLength:]))
     #print "mov avg idat shape:", data1.shape, "tailLength:", tailLength
     avgFilter = array([
         1. / (2. * tailLength + 1.),
     ] * (2 * tailLength + 1))
     #print "avgFilter:", avgFilter.shape
     res = convolve(data1, avgFilter)[2 * tailLength:-2 * tailLength]
     #print "mov avg shape:", res.shape
     return res
Exemplo n.º 40
0
def similAndPear(A, B=None, mmm='max'):
    '''A have to be 1 dimensional.'''
    sP = fPearsonCorrelation(A.T, B)
    sE = similarity_Euclidean(A, B)

    if A.ndim == 2:
        if mmm == 'max':
            return concatenate((sP[:, newaxis], sE[:, newaxis]), 1).max(1)
        elif mmm == 'mean':
            return concatenate((sP[:, newaxis], sE[:, newaxis]), 1).mean(1)
        elif mmm == 'min':
            return concatenate((sP[:, newaxis], sE[:, newaxis]), 1).min(1)
        elif mmm == None:
            return sP[:, newaxis], sE[:, newaxis]
    else:
        if mmm == 'max':
            return max(sP, sE)
        elif mmm == 'mean':
            return mean((sP, sE))
        elif mmm == 'min':
            return min(sP, sE)
        elif mmm == None:
            return sP, sE
Exemplo n.º 41
0
def snakeScan(centre=(0, 0), range=(10e-6, 10e-6), spacing=(1.0e-6, 1.0e-6)):

    from pylab import arange, concatenate, repeat

    if len(spacing) == 1:
        spacing = (spacing, spacing)

    # define some grids
    xgrid = arange(20, 31)
    ygrid = arange(10, 16)

    xscan = concatenate([xgrid[:: (-1) ** i] for i in range(len(ygrid))])
    yscan = repeat(ygrid, len(xgrid))
    return list(zip(a, b))
Exemplo n.º 42
0
def other():
    data = pylab.concatenate((pylab.normal(1, .2,
                                           5000), pylab.normal(2, .2, 2500)))
    y, x, _ = pylab.hist(data, 100, alpha=.3, label='data')

    # x = x[1:]
    x = (x[1:] + x[:-1]) / 2  # for len(x)==len(y)  # TODO what?

    expected = (1, .2, 250, 2, .2, 125)
    params, cov = curve_fit(bimodal, x, y, expected)
    sigma = numpy.sqrt(numpy.diag(cov))
    plt.plot(x, bimodal(x, *params), color='red', lw=3, label='model')
    plt.legend()
    print(params, '\n', sigma)
Exemplo n.º 43
0
def plot_net_survival(db, country_list):
    import pylab as pl
    import settings
    pl.clf()
    ii = 0.
    for k, p in sorted(db.items()):
        country = k.split('_')[2]  # TODO: refactor k.split into function
        if country not in country_list:
            continue
        pr = pl.sort(p.__getattribute__('Pr[net is lost]').gettrace())
        pr0 = pr[.025 * len(pr)]
        pr1 = pr[.975 * len(pr)]

        t = pl.arange(0, 5, .1)
        pct0 = 100. * pl.where(t < 3, (1 - pr0)**t, 0.)
        pct1 = 100. * pl.where(t < 3, (1 - pr1)**t, 0.)

        pl.fill(pl.concatenate((t, t[::-1])),
                pl.concatenate((pct0, pct1[::-1])),
                alpha=.9,
                linewidth=3,
                facecolor='none',
                edgecolor=pl.cm.spectral(ii / len(country_list)),
                label=country)
        pl.fill(pl.concatenate((t, t[::-1])),
                pl.concatenate((pct0, pct1[::-1])),
                alpha=.5,
                linewidth=0,
                facecolor=pl.cm.spectral(ii / len(country_list)),
                zorder=-ii)
        ii += 1.
    pl.legend()
    pl.ylabel('Nets Remaining (%)')
    pl.xlabel('Time in household (years)')
    pl.title('LLIN Survival Curve Posteriors')
    pl.savefig(settings.PATH + 'net_survival.png')
Exemplo n.º 44
0
def axes_in_mm(x0,
               y0,
               w,
               h,
               fig=None,
               label=None,
               label_xoff=1.5,
               label_yoff=1.5,
               label_params={},
               **kwargs):
    """

    Parameters
    ----------
    x0
    y0
    w
    h
    fig
    label
    label_xoff
    label_yoff
    label_params : dict
        dictionary with font properties fontweight, size, va, ha,
    ensure_even : bool
        round the size of the Figure such that it has even number of pixels, necessary for most versions of ffmpeg
    kwargs

    Returns
    -------

    """
    if fig is None:
        fig = pylab.gcf()
    size_mm = fig.get_size_inches() * 25.4
    fig_scale = SCALE_FACTOR / pylab.concatenate([size_mm, size_mm])

    ax = fig.add_axes([x0, y0, w, h] * fig_scale, **kwargs)
    if label is not None:
        lp = dict(va='top', ha='left', fontweight='bold')
        lp.update(label_params)
        ax.text(float(label_xoff) / w,
                1 - float(label_yoff) / h,
                label,
                transform=ax.transAxes,
                **lp)

    return ax
Exemplo n.º 45
0
 def __init__(self,
              inputWeights,
              internalWeights,
              readoutSize,
              feedbackWeights=None,
              *args,
              **kwargs):
     '''
     '''
     if feedbackWeights is None:
         raise ValueError, "No feedback weights was passed as argument!..."
     newInputWeights = pl.concatenate((inputWeights, feedbackWeights),
                                      axis=1)
     super(ReservoirNSLFeedback,
           self).__init__(newInputWeights, internalWeights, readoutSize,
                          *args, **kwargs)
Exemplo n.º 46
0
    def train(self, inputList, desiredOutputList, fakeFeedback=None, **kwargs):
        '''
        '''
        if fakeFeedback is not None:
            assert len(fakeFeedback) == len(inputList)
            assert fakeFeedback[0].shape == desiredOutputList[0].shape
            feedbackInput = fakeFeedback
        else:
            feedbackInput = desiredOutputList

        newInputList = [
            pl.concatenate((input_, feedback), axis=1)
            for input_, feedback in zip(inputList, feedbackInput)
        ]

        super(ReservoirNSLFeedback, self).train(newInputList,
                                                desiredOutputList)
Exemplo n.º 47
0
def simulationWithDruga(numViruses, maxPop, maxBirthProb, clearProb,
                        resistances, mutProb, numTrials):
    """
    Runs simulations and plots graphs for problem 5.

    For each of numTrials trials, instantiates a patient, runs a simulation for
    150 timesteps, adds guttagonol, and runs the simulation for an additional
    150 timesteps.  At the end plots the average virus population size
    (for both the total virus population and the guttagonol-resistant virus
    population) as a function of time.

    numViruses: number of ResistantVirus to create for patient (an integer)
    maxPop: maximum virus population for patient (an integer)
    maxBirthProb: Maximum reproduction probability (a float between 0-1)        
    clearProb: maximum clearance probability (a float between 0-1)
    resistances: a dictionary of drugs that each ResistantVirus is resistant to
                 (e.g., {'guttagonol': False})
    mutProb: mutation probability for each ResistantVirus particle
             (a float between 0-1). 
    numTrials: number of simulation runs to execute (an integer)
    
    """

    pop = pylab.zeros([300, 2])
    for _ in range(numTrials):
        viruses = [
            ResistantVirus(maxBirthProb, clearProb, resistances, mutProb)
            for _ in range(numViruses)
        ]
        patient = TreatedPatient(viruses, maxPop)
        result1 = [[patient.update(),
                    patient.getResistPop(['guttagonol'])] for _ in range(150)]
        patient.addPrescription('guttagonol')
        result2 = [[patient.update(),
                    patient.getResistPop(['guttagonol'])] for _ in range(150)]
        pop += pylab.concatenate((result1, result2))
    pop = pylab.transpose(pop) / numTrials
    pylab.plot(pop[0], label="avg_pop")
    pylab.plot(pop[1], label="resistant_pop")
    pylab.title("ResistantVirus Simulation")
    pylab.xlabel("Time Steps")
    pylab.ylabel("Average Virus Population")
    pylab.legend(loc="best")
    pylab.show()
Exemplo n.º 48
0
def cmap_discretize(cmap, N):
    """
    Return a discrete colormap from the continuous colormap cmap.
    cmap: colormap instance, eg. cm.jet. 
    N: number of colors.
    """

    if type(cmap) == str:
        cmap = get_cmap(cmap)
    colors_i = concatenate((linspace(0, 1., N), (0., 0., 0., 0.)))
    colors_rgba = cmap(colors_i)
    indices = linspace(0, 1., N + 1)
    cdict = {}
    for ki, key in enumerate(('red', 'green', 'blue')):
        cdict[key] = [(indices[i], colors_rgba[i - 1, ki],
                       colors_rgba[i, ki]) for i in range(N + 1)]
    # Return colormap object.
    return matplotlib.colors.LinearSegmentedColormap(cmap.name + "_%d" % N,
                                                     cdict, 1024)
Exemplo n.º 49
0
def mine_ok():
    data = pylab.concatenate(
        (pylab.normal(muP, sigmaP,
                      samplesP), pylab.normal(muC, sigmaC, samplesC)))
    y, x, _ = pylab.hist(data, 100, alpha=.3, label='data')

    # x = x[1:]
    x = (x[1:] + x[:-1]) / 2  # for len(x)==len(y)  # TODO what?

    expected = (muP, sigmaP, aP, muC, sigmaC, aC)

    params, cov = curve_fit(bimodal, x, y, expected)
    sigma = numpy.sqrt(numpy.diag(cov))
    plt.plot(x, bimodal(x, *params), color='red', lw=3, label='model')
    plt.legend()
    print('params: ', end='')
    print(params)
    print('sigma: ', end='')
    print(sigma)
Exemplo n.º 50
0
def norm_hist_bins(y, bins=10, normed='height'):
    """Just like the matplotlib mlab.hist, but can normalize by height.

    normed can be 'area' (produces matplotlib behavior, area is 1), 
    any False value (no normalization), or any True value (normalization).

    Original docs from matplotlib:

    Return the histogram of y with bins equally sized bins.  If bins
    is an array, use the bins.  Return value is
    (n,x) where n is the count for each bin in x

    If normed is False, return the counts in the first element of the
    return tuple.  If normed is True, return the probability density
    n/(len(y)*dbin)
    
    If y has rank>1, it will be raveled
    Credits: the Numeric 22 documentation
    """
    y = asarray(y)
    if len(y.shape)>1: y = ravel(y)

    if not iterable(bins):
        ymin, ymax = min(y), max(y)
        if ymin==ymax:
            ymin -= 0.5
            ymax += 0.5

        if bins==1: bins=ymax
        dy = (ymax-ymin)/bins
        bins = ymin + dy*arange(bins)
    n = searchsorted(sort(y), bins)
    n = diff(concatenate([n, [len(y)]]))
    if normed:
        if normed == 'area':
            db = bins[1]-bins[0]
        else:
            db = 1.0
        return 1/(len(y)*db)*n, bins
    else:
        return n, bins
Exemplo n.º 51
0
 def set_pdf(self, x, p, Nrl=1000):
     self.x = x
     self.pdf = p / p.sum()
     self.cdf = self.pdf.cumsum()
     self.inversecdfbins = Nrl
     self.Nrl = Nrl
     y = pylab.arange(Nrl) / float(Nrl)
     delta = 1.0 / Nrl
     self.inversecdf = pylab.zeros(Nrl)
     self.inversecdf[0] = self.x[0]
     cdf_idx = 0
     for n in range(1, self.inversecdfbins):
         while self.cdf[cdf_idx] < y[n] and cdf_idx < Nrl:
             cdf_idx += 1
         self.inversecdf[n] = self.x[cdf_idx - 1] + (
             self.x[cdf_idx] - self.x[cdf_idx - 1]) * (y[n] - self.cdf[
                 cdf_idx - 1]) / (self.cdf[cdf_idx] - self.cdf[cdf_idx - 1])
         if cdf_idx >= Nrl:
             break
     self.delta_inversecdf = pylab.concatenate(
         (pylab.diff(self.inversecdf), [0]))
Exemplo n.º 52
0
def MutationPerClone(x):
    '''Calculates average number and STD of mutations from MutationRecords.dat
    file per clone. Mut_record[0] - number of cells, [1] - number of clones, [2]
    - mean number of point mutations, [3] - STD of point mutations, [4] - mean
    number of duplications, [5] - STD  of duplications, [6] - mean number of
    deletion, [7] - STD of deletion.'''
    Mut_record = p.zeros(8)
    Mut_record[0] = x.shape[0]
    ZERRO = p.zeros((x.shape[0],1))
    x = p.concatenate((x, ZERRO), axis=1)
    for i in xrange(0, x.shape[0]):
        if (x[i, 4] == 0):
            for j in xrange(i+1, x.shape[0]):
                if (x[j, 0] == x[i, 0]):
                    x[j, 4] = p.nan
    x = x[~p.isnan(x).any(1)]
    Mut_record[1] = x.shape[0]
    Mut_record[2] = x[:,1].mean()
    Mut_record[3] = x[:,1].std()
    Mut_record[4] = x[:,2].mean()
    Mut_record[5] = x[:,2].std()
    Mut_record[6] = x[:,3].mean()
    Mut_record[7] = x[:,3].std()
    return Mut_record
Exemplo n.º 53
0
 def concatenate_inputweights(self):
     '''Re-concatenate input neurons weights and feedback weights'''
     self.inputWeights = pl.concatenate(
         (self.inputOnlyWeights, self.feedbackWeights), axis=1)
Exemplo n.º 54
0
def heatMap(data, pmData, wells, outDir, plateFlag):
    '''Make growth heatmap after curve fitting and analysis'''

    wellids = ['{}{}'.format(w[0], w[1]) for w in wells]
    clones = sorted(pmData.clones)
    # plateFlag == True: use growth condition names
    # plateFlag == False: use well ids
    if plateFlag:
        # Get growth condition
        tmp = ['{}-{}'.format(w, pmData.wells[w][1]) for w in wellids]
        wellLabels = [x if len(x) <= 20 else '{}...'.format(x[:18])
                      for x in tmp]
    else:
        wellLabels = wellids

    first = True  # Flag for creating plotData numpy array
    for clone in clones:
        # ...[params][4] is growth level
        tmpArr = [data[clone][w]['params'][4] for w in wellids]

        if first:
            plotData = py.array(tmpArr, ndmin=2)  # 2 dimensional array
            first = False
        else:
            plotData = py.concatenate((plotData, [tmpArr]))

    ######################################################
    # Plotting
    ######################################################
    numClones = len(clones)
    numWells = len(wellLabels)

    # Width is 15 inches
    # All measurements are in inches
    width = 15

    # Height determined by number of clones
    # Cap at 12 inches
    height = len(clones)
    if height > 12:
        height = 12

    # Fontsize of x axis determined by
    # number of wells on x axsis
    # Minimum at 13 was determined by trial and error
    if numWells > 13:
        xfontsize = numWells / 13
    else:
        xfontsize = 10
    yfontsize = 10

    # Create figure and axis
    fig, ax = plt.subplots()
    fig.set_size_inches(width, height)

    # Create heatmap object using pcolor
    # Find maximum growth level value
    maxGL = py.amax(plotData) + 0.1
    hm = ax.pcolor(plotData,
                   cmap=plt.cm.Greys,
                   edgecolor='black',
                   vmin=0,
                   vmax=maxGL)

    # Create color bar legend
    ###mini = py.amin(plotData)
    ###maxi = py.amax(plotData)
    ###cbticks = [mini, maxi, 0.25, 0.75]
    ###cblabs = ['min', 'max', 'no growth', 'growth']
    ###cbticks, cblabs = zip(*sorted(zip(cbticks, cblabs)))
    cbticks = [0.25, 0.75]
    cblabs = ['no growth', 'growth']
    cbar = fig.colorbar(hm, orientation='horizontal')
    cbar.set_ticks(cbticks)
    cbar.set_ticklabels(cblabs)

    # Move x axis to top
    ax.xaxis.tick_top()
    ax.yaxis.tick_left()

    # Align x and y tick marks to center of cells
    ax.set_xticks(py.arange(0, numWells) + 0.5)
    ax.set_yticks(py.arange(0, numClones) + 0.5)

    # Set tick labels
    ax.set_xticklabels(labels=wellLabels,
                       minor=False,
                       rotation=90,
                       fontsize=xfontsize)
    ax.set_yticklabels(labels=clones, minor=False, fontsize=yfontsize)
    ax.axis('tight')
    # Remove tick marks lines
    plt.tick_params(axis='both',
                    left='off',
                    right='off',
                    bottom='off',
                    top='off')

    plt.savefig('{}/growthlevels.png'.format(outDir),
                dpi=200,
                bbox_inches='tight')
Exemplo n.º 55
0
    return t, r


if __name__ == "__main__":
    zeros = pylab.arange(2, 40, 4)
    ts = pylab.array([])
    #Test window_spike_train, repeated several times
    #1 second silence
    #1 second 100Hz fixed
    #1 second silence
    #1 second 100Hz poisson
    for n in xrange(zeros.size):
        tsf = pylab.arange(0, 1, .01) + zeros[
            n] - 1  #100 points spaced at .01 with 500ms of silence at the start
        tsp = poisson_train(rate=100, duration=1) + zeros[n] + 1
        ts = pylab.concatenate((ts, tsf, tsp))

    for start_time, zero_time, end_time in zip([0, -2], [0, zeros], [None, 2]):
        pylab.figure()
        t, be, isihist = isi_histogram(ts,
                                       start_time=start_time,
                                       zero_times=zero_time,
                                       end_time=end_time,
                                       window_len=.1,
                                       range=.02,
                                       nbins=21)
        pylab.subplot(4, 1, 1)
        pylab.pcolor(t, be, isihist.T, vmin=0, vmax=1, cmap=pylab.cm.gray_r)
        pylab.ylabel('isi')
        t, cv, rate = spikecv(ts,
                              start_time=start_time,
Exemplo n.º 56
0
pl.xlim(xra)
pl.ylim(yra)
pl.xticks([])
pl.yticks([])
pl.contour(fils.skeleton,
           colors='c',
           linewidths=1,
           vmin=0,
           vmax=1,
           levels=[0.5])

for ff in fils.filaments:
    for b in ff.end_pts:
        pl.plot(b[1], b[0], 'ob', markersize=2)
    if len(ff.intersec_pts) > 0:
        for b in pl.concatenate(ff.intersec_pts):
            pl.plot(b[1], b[0], 'ob', markersize=2)

pl.savefig(label + ".plots/" + label + ".fils_branches.png")

#================================================================

mom1 = fits.getdata(mom1file)
ly, lx = mom1.shape
x, y = range(0, lx), range(0, ly)
xi, yi = pl.meshgrid(x, y)

wid = 9
from astropy.convolution import Gaussian2DKernel
from astropy.convolution import convolve
kernel = Gaussian2DKernel(stddev=wid / 2.354)
Exemplo n.º 57
0
def polygon(x, y1, y2, *args, **kwargs):
    x = pl.concatenate((x, x[::-1]))
    y = pl.concatenate((y1, y2[::-1]))
    pl.fill(x, y, *args, **kwargs)
Exemplo n.º 58
0
def fit_screen_to_tec(station_names, source_names, pp, airmass, rr, times,
                      height, order, r_0, beta):
    """
    Fits a screen to given TEC values using Karhunen-Lo`eve base vectors

    Keyword arguments:
    station_names -- array of station names
    source_names -- array of source names
    pp -- array of piercepoint locations
    airmass -- array of airmass values
    rr -- array of TEC solutions
    times -- array of times
    height -- height of screen (m)
    order -- order of screen (i.e., number of KL base vectors to keep)
    r_0 -- scale size of phase fluctuations (m)
    beta -- power-law index for phase structure function (5/3 =>
        pure Kolmogorov turbulence)
    """
    import numpy as np
    from pylab import kron, concatenate, pinv, norm, newaxis, find, amin, svd, eye
    try:
        import progressbar
    except ImportError:
        import losoto.progressbar as progressbar

    logging.info('Fitting screens to TEC values...')
    N_stations = len(station_names)
    N_sources = len(source_names)
    N_times = len(times)

    tec_fit_all = np.zeros((N_times, N_sources, N_stations))
    residual_all = np.zeros((N_times, N_sources, N_stations))

    A = concatenate([
        kron(eye(N_sources), np.ones((N_stations, 1))),
        kron(np.ones((N_sources, 1)), eye(N_stations))
    ],
                    axis=1)

    N_piercepoints = N_sources * N_stations
    P = eye(N_piercepoints) - np.dot(np.dot(A, pinv(np.dot(A.T, A))), A.T)

    pbar = progressbar.ProgressBar(maxval=N_times).start()
    ipbar = 0
    for k in range(N_times):
        try:
            D = np.resize(pp[k, :, :], (N_piercepoints, N_piercepoints, 3))
            D = np.transpose(D, (1, 0, 2)) - D
            D2 = np.sum(D**2, axis=2)
            C = -(D2 / r_0**2)**(beta / 2.0) / 2.0
            P1 = eye(N_piercepoints) - np.ones(
                (N_piercepoints, N_piercepoints)) / N_piercepoints
            C1 = np.dot(np.dot(P1, C), P1)
            U, S, V = svd(C1)

            B = np.dot(P, np.dot(np.diag(airmass[k, :]), U[:, :order]))
            pinvB = pinv(B, rcond=1e-3)

            rr1 = np.dot(P, rr[:, k])
            tec_fit = np.dot(U[:, :order], np.dot(pinvB, rr1))
            tec_fit_all[k, :, :] = tec_fit.reshape((N_sources, N_stations))

            residual = rr1 - np.dot(P, tec_fit)
            residual_all[k, :, :] = residual.reshape((N_sources, N_stations))
        except:
            # Set screen to zero if fit did not work
            logging.debug('Tecscreen fit failed for timeslot {0}'.format(k))
            tec_fit_all[k, :, :] = np.zeros((N_sources, N_stations))
            residual_all[k, :, :] = np.ones((N_sources, N_stations))

        pbar.update(ipbar)
        ipbar += 1
    pbar.finish()

    return tec_fit_all, residual_all
Exemplo n.º 59
0
def sym_correlation_profile(s1, Nstat=3, edge_bins=None, disp=False):
    ''' 
    INPUT:
    - s1: the adjacency matrix of an undirected network  
    - srand: nullUndirectedNetwork(s1)
    - Nstat: (optional) the number of randomized networks in the ensemble. Default: 3
    - edge_bins: (otional) the array to bin degrees. Default: [1,3,10,30,100...]
    
    OUTPUT:
    - n_12: number of edges connecting different bins to each other
    - nr_12: same averaged over Nstat realizations of a randomized network
    - nsr_12: sqaure of nr_12 averaged over Nstat realizations of a randomized network
    - R_12: correlation profile ratio: R_12=n_12./nr_12;
    - Z_12: correlation profile Z-score: Z_12=(n_12-nr_12)./sqrt(nsr_12-nr_12.^2);'''

    srand = 1 * (0 < abs(s1 - diag(diag(s1))))
    srand = 1 * (0 < (srand + srand.T))

    k2 = sum(srand)
    k2_max = k2.max()

    if edge_bins == None:
        edge_bins = [1, 3]
        m1 = 1
        while edge_bins[m1] <= k2_max:
            edge_bins.append(10 * edge_bins[m1 - 1])
            m1 += 1

    bedg1 = list(edge_bins)

    if k2_max > bedg1[-1]:
        bedg1.append(k2_max)

    n_1_2_sym_orig = binned_srand_internal(srand, bedg1, bedg1)

    print('randomized network #', 1)
    srand = nullUndirectedNetwork(srand)
    n_1_2 = binned_srand_internal(srand, bedg1, bedg1)

    aver_n_1_2_sym = n_1_2
    aver_sq_n_1_2_sym = n_1_2**2

    for k in range(1, Nstat):
        print('randomized network #', k + 1)
        srand = nullUndirectedNetwork(srand)
        n_1_2 = binned_srand_internal(srand, bedg1, bedg1)
        aver_n_1_2_sym = aver_n_1_2_sym + n_1_2
        aver_sq_n_1_2_sym = aver_sq_n_1_2_sym + n_1_2**2

    aver_n_1_2_sym = aver_n_1_2_sym / Nstat
    aver_sq_n_1_2_sym = aver_sq_n_1_2_sym / Nstat
    err_n_1_2_sym = sqrt(aver_sq_n_1_2_sym - aver_n_1_2_sym**2)

    sym_ratio_1_2_sym = n_1_2_sym_orig / (aver_n_1_2_sym + 0.0001 *
                                          (aver_n_1_2_sym == 0))
    dev_n_1_2_sym_orig = (n_1_2_sym_orig -
                          aver_n_1_2_sym) / (err_n_1_2_sym + 0.0001 *
                                             (aver_n_1_2_sym == 0))

    sym_ratio_1_2_sym = sym_ratio_1_2_sym[:-1, :-1]
    dev_n_1_2_sym_orig = dev_n_1_2_sym_orig[:-1, :-1]

    R_12 = sym_ratio_1_2_sym
    Z_12 = dev_n_1_2_sym_orig
    n_12 = n_1_2_sym_orig[:-1, :-1]
    nr_12 = aver_n_1_2_sym[:-1, :-1]
    nsr_12 = aver_sq_n_1_2_sym[:-1, :-1]

    sym_ratio_1_2_sym = concatenate(
        (sym_ratio_1_2_sym, sym_ratio_1_2_sym[-1, :][newaxis]), axis=0)
    sym_ratio_1_2_sym = concatenate(
        (sym_ratio_1_2_sym, sym_ratio_1_2_sym[:, -1][:, newaxis]), axis=1)
    dev_n_1_2_sym_orig = concatenate(
        (dev_n_1_2_sym_orig, dev_n_1_2_sym_orig[-1, :][newaxis]), axis=0)
    dev_n_1_2_sym_orig = concatenate(
        (dev_n_1_2_sym_orig, dev_n_1_2_sym_orig[:, -1][:, newaxis]), axis=1)

    if disp:
        from pylab import figure, meshgrid, pcolor, title, xlabel, ylabel, xscale, yscale, colorbar
        x, y = meshgrid(bedg1, bedg1)

        figure()
        title('R(K_1,K_2) for network w/ %s nodes, %s links' %
              (srand.shape[1], srand.sum() / 2))
        pcolor(x, y, sym_ratio_1_2_sym)
        colorbar()
        xlabel('K_1')
        xscale('log')
        ylabel('K_2')
        yscale('log')

        figure()
        title('Z(K_1,K_2) for network w/ %s nodes, %s links' %
              (srand.shape[1], srand.sum() / 2))
        pcolor(x, y, dev_n_1_2_sym_orig)
        colorbar()
        xlabel('K_1')
        xscale('log')
        ylabel('K_2')
        yscale('log')

    return R_12, Z_12, n_12, nr_12, nsr_12
Exemplo n.º 60
0
def rhythm(signal_params=None, rhythm_params=None, patterns=None):
    """
    ::

        Generate a multi-timbral rhythm sequence using noise-band timbres 
        with center-frequency, bandwidth, and decay time controls

        Timbre signal synthesis parameters are specified in 
        the signal_params dict:
            ['cf'] - list of center-frequencies for each timbre
            ['bw'] - list of band-widths for each timbre
            ['dur'] - list of timbre durations relative to a quarter note
            ['amp'] - list of timbre relative amplitudes [default 1.0]
            ['sr'] - sample rate of generated audio
            ['tc'] - constant of decay envelope relative to subdivisions:
             The following expression yields a time-constant for decay to -60dB 
             in a given number of beats at the given tempo:
               t = beats * tempo / 60.
               e^( -tc * t ) = 10^( -60dB / 20 )
               tc = -log( 0.001 ) / t           

        The rhythm sequence is generated with musical parameters specified in
        the rhythm_params dict: 
            ['tempo']  - how fast
            ['subdiv'] - how many pulses to divide a 4/4 bar into

        Rhythm sequences are specified in the patterns tuple (p1,p2,...,pn)
           patterns - n-tuple of integers with subdiv-bits onset patterns, 
            one integer element for each timbre

           Parameter constraints:
             Fail if not:
               len(bw) == len(cf) == len(dur) == len(patterns)
    """
    # Short names
    p = default_rhythm_params()
    signal_params = p[0] if signal_params is None else signal_params
    rhythm_params = p[1] if rhythm_params is None else rhythm_params
    patterns = p[2] if patterns is None else patterns
    num_timbres = _check_rhythm_params(signal_params, rhythm_params, patterns)

    # convenience variables
    sp = signal_params
    rp = rhythm_params

    # Duration parameters
    qtr_dur = 60.0 / rp['tempo'] * sp['sr']  # duration of 1/4 note
    eth_dur = 60.0 / (2.0 * rp['tempo']) * sp['sr']  # duration of 1/8 note
    sxt_dur = 60.0 / (4.0 * rp['tempo']) * sp['sr']  # duration of 1/16 note
    meter = 4.0
    bar_dur = meter * qtr_dur  # duration of 1 bar

    # Audio signal wavetables from parameters
    ns_sig = []
    ns_env = []
    for cf, bw, dur, amp in zip(sp['cf'], sp['bw'], sp['dur'], sp['amp']):
        ns_par = default_noise_params()
        ns_par['sr'] = sp['sr']
        ns_par['cf'] = cf
        ns_par['bw'] = bw
        ns_par['num_points'] = 2 * bar_dur
        ns_sig.append(amp * noise(ns_par))
        ns_env.append(
            pow(10, -sp['tc'] * pylab.r_[0:2 * bar_dur] / (qtr_dur * dur)))

    # Music wavetable sequencer
    snd = [[] for _ in range(num_timbres)]
    snd_ptr = [qtr_dur for _ in range(num_timbres)]
    num_beats = rp['subdiv']
    test_bit = 1 << (num_beats - 1)
    dt = 16.0 / num_beats
    for beat in range(num_beats):
        for p, pat in enumerate(patterns):
            if (pat & (test_bit >> beat)): snd_ptr[p] = 0

        for t in range(num_timbres):
            idx = pylab.array(pylab.r_[snd_ptr[t]:snd_ptr[t] + sxt_dur * dt],
                              dtype='int')
            snd[t].append(ns_sig[t][idx] * ns_env[t][idx])
            snd_ptr[t] += sxt_dur * dt

    all_sig = pylab.concatenate(snd[0])
    for t in pylab.arange(1, num_timbres):
        sig = pylab.concatenate(snd[t])
        all_sig += sig
    return balance_signal(all_sig, sp['normalize'])