示例#1
0
def add_brackets(xloc, yloc, xshift=0, color="r", label=None, options=None):
    """Add two brackets on the memory line plot.

    This function uses the current figure.

    Parameters
    ==========
    xloc: tuple with 2 values
        brackets location (on horizontal axis).
    yloc: tuple with 2 values
        brackets location (on vertical axis)
    xshift: float
        value to subtract to xloc.
    """
    try:
        import pylab as pl
    except ImportError:
        print("matplotlib is needed for plotting.")
        sys.exit(1)
    height_ratio = 20.
    vsize = (pl.ylim()[1] - pl.ylim()[0]) / height_ratio
    hsize = (pl.xlim()[1] - pl.xlim()[0]) / (3.*height_ratio)

    bracket_x = pl.asarray([hsize, 0, 0, hsize])
    bracket_y = pl.asarray([vsize, vsize, -vsize, -vsize])

    # Matplotlib workaround: labels starting with _ aren't displayed
    if label[0] == '_':
        label = ' ' + label
    if options.xlim is None or options.xlim[0] <= (xloc[0] - xshift) <= options.xlim[1]:
        pl.plot(bracket_x + xloc[0] - xshift, bracket_y + yloc[0],
                "-" + color, linewidth=2, label=label)
    if options.xlim is None or options.xlim[0] <= (xloc[1] - xshift) <= options.xlim[1]:
        pl.plot(-bracket_x + xloc[1] - xshift, bracket_y + yloc[1],
                "-" + color, linewidth=2 )
示例#2
0
    def calculateFDunc(self):
        #Calculates the uncertainty of the FFT according to:
        #   - J. M. Fornies-Marquina, J. Letosa, M. Garcia-Garcia, J. M. Artacho, "Error Propagation for the transformation of time domain into frequency domain", IEEE Trans. Magn, Vol. 33, No. 2, March 1997, pp. 1456-1459
        #return asarray _tdData
        #Assumes tha the amplitude of each time sample is statistically independent from the amplitude of the other time
        #samples

        # Calculates uncertainty of the real and imaginary part of the FFT and ther covariance
        unc_E_real = []
        unc_E_imag = []
        cov = []
        for f in self.getfreqs():
            unc_E_real.append(py.sum((py.cos(2*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX())**2))
            unc_E_imag.append(py.sum((py.sin(2*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX())**2))
            cov.append(-0.5*sum(py.sin(4*py.pi*f*self._tdData.getTimes())*self._tdData.getUncEX()**2))
        
        unc_E_real = py.sqrt(py.asarray(unc_E_real))
        unc_E_imag = py.sqrt(py.asarray(unc_E_imag))
        cov = py.asarray(cov)
        
        # Calculates the uncertainty of the modulus and phase of the FFT
        unc_E_abs = py.sqrt((self.getFReal()**2*unc_E_real**2+self.getFImag()**2*unc_E_imag**2+2*self.getFReal()*self.getFImag()*cov)/self.getFAbs()**2)
        unc_E_ph = py.sqrt((self.getFImag()**2*unc_E_real**2+self.getFReal()**2*unc_E_imag**2-2*self.getFReal()*self.getFImag()*cov)/self.getFAbs()**4)
        
        t=py.column_stack((self.getfreqs(),unc_E_real,unc_E_imag,unc_E_abs,unc_E_ph))
        return self.getcroppedData(t)  
示例#3
0
文件: rocabilly.py 项目: bcb225/GWAB
 def correlation_curve_file(self, fname, colx=1, coly=2):
     """
     Takes a file of hits in tab delimited format where the hits are ordered from least to most likely,
     and plot a log log scatter plot of one column of data against another.
     @param fname: Name of file in standard output format for gwaboost.py
     @param report: If set to a number the program returns the first n hits, otherwise False.
     @param colx: First column to scatter stuff from.
     @param coly: Second column to scatter stuff from.
     @return: The data in colx and coly, as log numpy arrays.
     """
     import pylab
     file = open(fname)
     params = file.readline()
     col_ids = file.readline().split('\t')
     candidates = [(float(line.split('\t')[colx]),
                    float(line.split('\t')[coly])) for line in file]
     candidates.sort(reverse=True)
     #def ceil(x):
     #if x < 1e-15:
     #return 1e-15
     #else:
     #return x
     col0 = pylab.asarray([
         cand[0] for cand in candidates
     ])  #pylab.log([ceil(cand[0]) for cand in candidates])
     col1 = pylab.asarray([
         cand[1] for cand in candidates
     ])  #pylab.log([ceil(cand[1]) for cand in candidates])
     pylab.plot(col0, col1, 'go')
     pylab.xlabel(col_ids[colx])
     pylab.ylabel(col_ids[coly])
     return col0, col1
示例#4
0
def projectCl(lvec,P,D,dNdz,z,growthFac=None):
    """
    project C_l's given a power spectrum class P (Camb or
    BBKS) and Distance class D together

    arguments:
    lvec: vector of l values
    P: p.pk p.k contains the power spectrum, e.g. pt.Camb instance
    D: frw.Distance instance
    dNdz,z, growthFac: vectors suitable for trapezoid z-integration

    presently it crashes if z=0.0 is included, start from a small z value
    """
    lvec = M.asarray(lvec)
    dNdz2 = M.asarray(dNdz)**2
    z = M.asarray(z)
    da1 = 1./D.rtc(z)/D.h #comoving Da in h^1Mpc

    dNdz2vc = dNdz2/D.vc(z)/D.h**3 # comovin volume in (h^-1Mpc)^3
    #`use growth factor if given
    if growthFac:
        dNdz2vc = dNdz2vc*(growthFac**2)
    lk = M.log(P.k)
    pk = P.pk
    
##     return M.asarray([utils.trapz(utils.splineResample(pk,lk,
##                      M.log(l*da1))*dNdz2vc,z) for l in lvec])
    return M.asarray([utils.trapz(utils.interpolateLin(pk,lk,
                     M.log(l*da1))*dNdz2vc,z) for l in lvec])
示例#5
0
def plot_return_per_episode(file_names,
                            agents_dir,
                            logscale=True,
                            nlabel=True,
                            alpha_label=True):
    all_returns = []
    labels = []
    total_episodes = []
    for onefile in file_names:
        agents = pickle.load(open(agents_dir + onefile, 'rb'))
        number_of_episodes = agents[0].episode_number
        total_episodes.append([i + 1 for i in range(number_of_episodes)])
        temp_returns = zeros(number_of_episodes)
        for agent in agents:
            if logscale:
                temp_returns += log(-asarray(agent.return_per_episode))
            else:
                temp_returns += -asarray(agent.return_per_episode)
        all_returns.append(temp_returns / size(agents))

        sigma = 0
        beta = 0
        alpha = 0
        n = 0

        for k in range(len(onefile)):
            if onefile[k] == 'S':
                sigma = round(
                    float(read_letter_until_character(onefile, k + 1, '_')), 2)
            elif onefile[k] == 'A':
                alpha = int(read_letter_until_character(onefile, k + 1, '_'))
            elif onefile[k] == 'N':
                n = round(
                    int(read_letter_until_character(onefile, k + 1, '_')), 1)
            elif onefile[k] == 'B':
                beta = round(
                    float(read_letter_until_character(onefile, k + 1, '.p')),
                    2)

        temp_label = r'$\sigma$ = ' + str(sigma)
        if beta != 1: temp_label += r', $\beta$ = ' + str(beta)
        if alpha_label: temp_label += r', $\alpha$ = ' + str(alpha)
        if nlabel: temp_label += ', n = ' + str(n)

        labels.append(temp_label)

    colors = ['b', 'g', 'c', 'k', 'y', 'm', 'b']
    for i in range(size(file_names)):
        plt.plot(total_episodes[i],
                 all_returns[i],
                 color=colors[i],
                 label=labels[i])

    plt.legend(loc=1)
    plt.xlabel('Episode Number', fontsize=14)
    plt.ylabel('Average Return per Episode', fontsize=14)
示例#6
0
def show_first_and_last_frames(vid,f1=1,f2=2):
	vidlen = vidtools.vid_duration(vid)
	out = os.path.splitext(vid)[0]
	os.system('vid2png.py %s %s 0 1 1' % (vid,out))
	os.system('vid2png.py %s %s %s 1 1' % (vid,out,vidlen-1))
	m1 = pylab.asarray(Image.open(sorted(glob(out+'/*.png'))[0]).convert('L'))
	m2 = pylab.asarray(Image.open(sorted(glob(out+'/*.png'))[-1]).convert('L'))
	pylab.matshow(m1,fignum=f1)
	pylab.matshow(m2,fignum=f2)
	return(m1.mean()-m2.mean())
示例#7
0
文件: p2.py 项目: boada/ICD
def plot_color_vs_mass_vs_icd():
    galaxies = mk_galaxy_struc()
    # Add the figures

    # Mass vs color plot I-H
    f1 = pyl.figure(1, figsize=(6, 4))
    f1s1 = f1.add_subplot(212)

    color1 = []
    mass1 = []
    icd1 = []

    for i in range(len(galaxies)):
        if galaxies[i].ston_I > 30.0:
            if -0.05 < galaxies[i].ICD_IH and galaxies[i].ICD_IH < 0.25:
                mass1.append(galaxies[i].Mass)
                color1.append(galaxies[i].Imag - galaxies[i].Hmag)
                icd1.append(galaxies[i].ICD_IH)
            else:
                mass1.append(galaxies[i].Mass)
                color1.append(galaxies[i].Imag - galaxies[i].Hmag)
                icd1.append(0.25)

    # Sort the arrays by ICD
    mass1 = pyl.asarray(mass1)
    color1 = pyl.asarray(color1)
    icd1 = pyl.asarray(icd1)

    IH_array = pyl.column_stack((mass1, color1, icd1))

    IH_array = colsort(IH_array, 3)

    sc1 = f1s1.scatter(IH_array[:, 0],
                       IH_array[:, 1],
                       c=IH_array[:, 2],
                       s=50,
                       cmap='spectral')

    ############
    # FIGURE 1 #
    ############

    bar = pyl.colorbar(sc1)
    bar.set_label(r"$\xi[I,H]$")

    f1s1.set_xscale('log')
    f1s1.set_xlim(3e7, 1e12)
    f1s1.set_ylim(0.0, 4.0)
    f1s1.set_xlabel(r"Mass $[M_{\odot}]$")
    f1s1.set_ylabel("$(I-H)_{Observed}$")

    #    pyl.subplots_adjust(left=0.15, bottom=0.15, right=.75)
    #    pyl.savefig('color_vs_mass_vs_icd_IH.eps',bbox='tight')
    return f1s1
示例#8
0
文件: p2.py 项目: boada/ICD
def plot_color_vs_mass_vs_icd():
    galaxies=mk_galaxy_struc()
    # Add the figures

    # Mass vs color plot I-H
    f1 = pyl.figure(1,figsize=(6,4))
    f1s1 = f1.add_subplot(212)


    color1 = []
    mass1 = []
    icd1 = []

    for i in range(len(galaxies)):
        if galaxies[i].ston_I >30.0:
            if -0.05 < galaxies[i].ICD_IH and galaxies[i].ICD_IH < 0.25:
                mass1.append(galaxies[i].Mass)
                color1.append(galaxies[i].Imag-galaxies[i].Hmag)
                icd1.append(galaxies[i].ICD_IH)
            else:
                mass1.append(galaxies[i].Mass)
                color1.append(galaxies[i].Imag-galaxies[i].Hmag)
                icd1.append(0.25)

    # Sort the arrays by ICD
    mass1 = pyl.asarray(mass1)
    color1 = pyl.asarray(color1)
    icd1 = pyl.asarray(icd1)

    IH_array = pyl.column_stack((mass1,color1,icd1))

    IH_array = colsort(IH_array,3)

    sc1 = f1s1.scatter(IH_array[:,0], IH_array[:,1], c=IH_array[:,2], s=50,
    cmap='spectral')


    ############
    # FIGURE 1 #
    ############

    bar = pyl.colorbar(sc1)
    bar.set_label(r"$\xi[I,H]$")

    f1s1.set_xscale('log')
    f1s1.set_xlim(3e7,1e12)
    f1s1.set_ylim(0.0,4.0)
    f1s1.set_xlabel(r"Mass $[M_{\odot}]$")
    f1s1.set_ylabel("$(I-H)_{Observed}$")

#    pyl.subplots_adjust(left=0.15, bottom=0.15, right=.75)
#    pyl.savefig('color_vs_mass_vs_icd_IH.eps',bbox='tight')
    return f1s1
示例#9
0
文件: tcloop.py 项目: espenhgn/pyDOG
    def __init__( self, t, x, y, A=[1., 0.85], a=[0.25, 0.85] ):
        '''
        
        Initializing generalized thalamo-cortical loop. Full
        functionality is only obtained in the subclasses, like DOG,
        full_eDOG, etc.
        
        Parameters
        ----------
        
        t : array
            1D Time vector
        x : np.array
            1D vector for x-axis sampling points
        y : np.array
            1D vector for y-axis sampling points

        Keyword arguments
        -----------------
        
        A : sequence (default A = [1., 0.85])
            Amplitudes for DOG receptive field for center and surround, 
            respectively
        a : sequence (default a = [0.25, 0.85])
            Width parameter for DOG receptive field for center and surround,
            respectively

        Usage
        -----
            Look at subclasses for example usage            
        '''
        # Set parameteres as attributes
        self.name = 'pyDOG Toolbox'
        self.t = t
        self.A = A
        self.a = a
        self.x = x
        self.y = y
        # Find sampling rates and sampling freqs and 
        self.nu_xs = 1./(x[1]-x[0])
        self.nu_ys = 1./(y[1]-y[0])
        self.fs = 1./(t[1]-t[0])
        self.f = fft.fftfreq(pl.asarray(t).size, t[1]-t[0])
        # fftshift spatial frequency,
        self.nu_x = fft.fftfreq(pl.asarray(x).size, x[1]-x[0])
        self.nu_y = fft.fftfreq(pl.asarray(y).size, y[1]-y[0])
        # Make meshgrids, may come in handy
        self._xx, self._yy = pl.meshgrid(self.x, self.y)
        self._nu_xx, self._nu_yy = pl.meshgrid(self.nu_x, self.nu_y)
        # r is needed for all circular rfs
        self.r = pl.sqrt(self._xx**2 + self._yy**2)
        self.k = 2 * pl.pi * pl.sqrt(self._nu_xx**2 + self._nu_yy**2)
示例#10
0
def estimate_fwhm_ms(int_matrix, numberOfScansForFWHMCalc):
    """
    This function is going to randomly select a variety of spectra and estimate a FWHM using a
    gaussian fit on some of the most intense peaks in the selected spectra.
    
    :param int_matrix: Matrix of all data points
    :param numberOfScansForFWHMCalc: how many scans will we use for estimate
    :return: estimate of full-width-half-max in number of points
    """

    # keep the values to be averaged over
    fwhm_arr = pl.array([])

    print "Estimating FWHM of profile MS"
    print "..."
    for i in range(numberOfScansForFWHMCalc):
        # randomly select a scan to investigate.
        spec_index = random.randint(
            0,
            len(pl.squeeze(pl.asarray(int_matrix[0, :].todense()))) - 1)
        cur_spec = pl.squeeze(pl.asarray(int_matrix[:, spec_index].todense()))
        # could be 0 intensity points in good spectra becuase of the way the int_matrix is bui
        # lets work in units of scan number
        x = pl.arange(0, len(cur_spec))
        # Only going to be looking at the highest peak in each scan. Get the location of that peak.
        max_y = max(cur_spec)
        scan_max_y = pl.where(cur_spec == max_y)[0][0]
        popt, pcov = curve_fit(curves.gaussian,
                               x,
                               cur_spec,
                               bounds=([
                                   max_y * 0.8, scan_max_y - 1, 0.25 / 2.35482
                               ], [max_y * 1.2, scan_max_y + 1, 5 / 2.35482]))
        tmp_x = x
        tmp_y = curves.gaussian(x, popt[0], popt[1], popt[2])

        #pl.plot(tmp_x[scan_max_y-20:scan_max_y+20],tmp_y[scan_max_y-20:scan_max_y+20],c='r')
        #pl.plot(x[scan_max_y-20:scan_max_y+20],cur_spec[scan_max_y-20:scan_max_y+20],c='b',marker='o')
        #pl.show()

        # with the right fit parameter you can determine the full width at half max.
        # 2.35... is a known value to take this parameter and get the FWHM with.
        fwhm = 2.35482 * popt[2]
        fwhm_arr = pl.append(fwhm_arr, fwhm)

    avg_fwhm = fwhm_arr.mean()
    print "Done estimating FWHM of profile MS"
    print("avg_fwhm: " + str(avg_fwhm))

    # round up
    return int(avg_fwhm + 0.5)
示例#11
0
def learn(rate,desire,inp,weight):
    """
    desire ... one number;
    input ... one input vector
    wini ... initial weights
    """
    inp = asarray(inp)
    weight = asarray(weight)
    
    if neuron(weight,inp) == desire:
        dw = zeros(len(inp))
    else:
        dw = rate* desire * inp
    return dw
示例#12
0
 def _bringToCommonTimeAxis(self,tdDatas):
     #What can happen: 
     #a) datalengthes are not equal due to missing datapoints 
     #   => no equal frequency bins
     #b) time positions might not be equal
         
     miss_points_max=10    
     #check for missing datapoints, allowing 
     #not more than miss_points_max points to miss 
     all_lengthes=[]
     for thisdata in tdDatas:
         all_lengthes.append(len(thisdata[:,0]))
     
     #do it always, just print a warning, if miss_points_max is exceeded
     if min(all_lengthes)!=max(all_lengthes):
         print("Datalength of suceeding measurements not consistent, try to fix")
         if max(all_lengthes)-min(all_lengthes)>miss_points_max:
             print("Warning: Data seems to be corrupted. \n" +\
             "The length of acquired data of repeated measurements differs by \n" + \
                 str(max(all_lengthes)-min(all_lengthes)) + ' datapoints')
     
     #interpolation does no harm, even if everything is consistent (no interpolation in this case)
     commonMIN=max([thistdData[:,0].min() for thistdData in tdDatas])
     commonMAX=min([thistdData[:,0].max() for thistdData in tdDatas])
     commonLENGTH=min([thistdData[:,0].shape[0] for thistdData in tdDatas])
     
     #interpolate the data
     for i in range(self.numberOfDataSets):
         tdDatas[i]=self.getInterData(tdDatas[i],commonLENGTH,commonMIN,commonMAX)
     
     return py.asarray(tdDatas)
示例#13
0
def mk_grid(llx, ulx, nx, lly, uly, ny):
    # Get the Galaxy info
    #galaxies = mk_galaxy_struc()
    galaxies = pickle.load(open('galaxies.pickle','rb'))
    galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
    galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5, galaxies))

    # Make the low mass grid first
    x = [galaxy.Mass for galaxy in galaxies]
    y = [galaxy.ICD_IH *100 for galaxy in galaxies]
    bins_x =pyl.linspace(llx, ulx, nx)
    bins_y = pyl.linspace(uly, lly, ny)

    grid = []

    for i in range(bins_x.size-1):
        xmin = bins_x[i]
        xmax = bins_x[i+1]
        for j in  range(bins_y.size-1):
            ymax = bins_y[j]
            ymin = bins_y[j+1]

            cond=[cond1 and cond2 and cond3 and cond4 for cond1, cond2, cond3,
                cond4 in zip(x>=xmin, x<xmax, y>=ymin, y<ymax)]

            grid.append(galaxies.compress(cond))

    return grid
示例#14
0
def cosmic_rejection(x, y, n):
    '''Try to reject cosmic from a spectrum
    '''
    bla = True
    blabla = False

    if n == 0:
        print " Warning: sigma for rejection = 0. Take 0.01."
        n = 0.1

    msk = abs(y - y.mean()) > n * y.std(ddof=1)

    xrej = x[msk]
    yrej = y[msk]

    if bla:
        print " Rejection of points outside", n, "sigma around the mean."
        print " Number of rejected points:", xrej.size, '/', x.size

    if blabla:
        print " Rejected points:"
        print xrej
        print yrej

    msk = pl.asarray([not i for i in msk])

    return msk, xrej, yrej
示例#15
0
 def callback(self, verts):
     if len(self.Nxy) > 1:
         for j, series in enumerate(self.y):
             # ind = pl.np.nonzero(points_inside_poly(zip(self.x, series), verts))[0]
             ind = pl.np.nonzero(mpl_path(verts).contains_points(zip(self.x, series)))[0]
             for i in range(self.Nxy[1]):
                 if i in ind:
                     self.badpoints.append([i, j, self.y[j, i]])
                     self.axes.collections[j]._offsets[i,1] = pl.nan
                     self.y[j,i] = pl.nan
     else:
         cleanedpoints = self.axes.collections[0]._paths[0].vertices.tolist()
         # ind = pl.np.nonzero(points_inside_poly(cleanedpoints, verts))[0]
         ind = pl.np.nonzero(mpl_path(verts).contains_points(cleanedpoints))[0]
         removedcount = 0
         for i in range(len(cleanedpoints)):
             if i in ind:
                 self.badpoints.append([i, self.x[i], self.y[i]])
                 out = cleanedpoints.pop(i - removedcount)
                 self.axes.collections[0]._paths[0].vertices = pl.asarray(cleanedpoints)
                 original_indx = pl.find(self.x == out[0])
                 self.y[original_indx] = pl.nan
                 removedcount += 1
     self.canvas.draw_idle()
     self.canvas.widgetlock.release(self.lasso)
     del self.lasso
    def load_map(self, data, isReversed):
        
        # sort
        data=list(data)
        data.sort( key=lambda x: x[1])
        data.sort( key=lambda x: x[2])
        data=py.asarray(data)
        
        # find nfocus
        firstfocus=data[0][0]
        for row in data[1:]:
            self._nfocus+=1
            if row[0] == firstfocus:
                break
        
        # extract lum data    
        for row in data:
            if isReversed:
                lum = row[3:][::-1]
            else:
                lum = row[3:]
            self._specList.append(Spectrum(wavelen=self._wavelen,lum=lum))

        # split specList into points, ie. [[z1, z1, z3],[z1,z2,z3]]
        self._specList=[self._specList[i:i+self._nfocus] for i in range(0,len(self._specList),self._nfocus)]
        self._z = (data[:,0][0:self._nfocus])
        
        assert len(self._z) == len(self._specList[0]), "len of focuses must match specList sublist length"        
示例#17
0
def autocrop_image(img, border=0):
    img = pylab.asarray(img)

    ih, iw = img.shape[:2]

    rows = (img == img[:, 0:1, :]).all(1).all(-1)
    y0 = 0
    y1 = ih
    while (rows[y0] and y0 < y1):
        y0 += 1
    while (rows[y1 - 1] and y1 > y0):
        y1 -= 1

    cols = (img == img[0:1, :, :]).all(0).all(-1)
    x0 = 0
    x1 = iw
    while (cols[x0] and x0 < x1):
        x0 += 1
    while (cols[x1 - 1] and x1 > x0):
        x1 -= 1

    if border:
        x0 = max(x0 - border, 0)
        x1 = min(x1 + border, iw)
        y0 = max(y0 - border, 0)
        y1 = min(y1 + border, ih)

    return img[y0:y1, x0:x1]
示例#18
0
def cosmic_rejection(x, y, n):
    '''Try to reject cosmic from a spectrum
    '''
    bla = True
    blabla = False

    if n == 0:
        print " Warning: sigma for rejection = 0. Take 0.01."
        n = 0.1

    msk = abs(y-y.mean()) > n * y.std(ddof=1)

    xrej = x[msk]
    yrej = y[msk]

    if bla:
        print " Rejection of points outside", n, "sigma around the mean."
        print " Number of rejected points:", xrej.size, '/', x.size

    if blabla:
        print " Rejected points:"
        print xrej
        print yrej

    msk = pl.asarray([not i for i in msk])

    return msk, xrej, yrej
示例#19
0
 def update(self, state, action, value):
     tile_indices = asarray(
         tiles(self.iht, self.numTilings,
               [8 * state[0] / (0.5 + 1.2), 8 * state[1] / (0.07 + 0.07)]),
         dtype=int) + (action * self.numTiles)
     for index in tile_indices:
         self.theta[index] += value
示例#20
0
def mk_grid(llx, ulx, nx, lly, uly, ny):
    # Get the Galaxy info
    #galaxies = mk_galaxy_struc()
    galaxies = pickle.load(open('galaxies.pickle', 'rb'))
    galaxies = filter(lambda galaxy: galaxy.ston_I > 30., galaxies)
    galaxies = pyl.asarray(filter(lambda galaxy: galaxy.ICD_IH < 0.5,
                                  galaxies))

    # Make the low mass grid first
    x = [galaxy.Mass for galaxy in galaxies]
    y = [galaxy.ICD_IH * 100 for galaxy in galaxies]
    bins_x = pyl.linspace(llx, ulx, nx)
    bins_y = pyl.linspace(uly, lly, ny)

    grid = []

    for i in range(bins_x.size - 1):
        xmin = bins_x[i]
        xmax = bins_x[i + 1]
        for j in range(bins_y.size - 1):
            ymax = bins_y[j]
            ymin = bins_y[j + 1]

            cond = [
                cond1 and cond2 and cond3
                and cond4 for cond1, cond2, cond3, cond4 in zip(
                    x >= xmin, x < xmax, y >= ymin, y < ymax)
            ]

            grid.append(galaxies.compress(cond))

    return grid
def calc_cma(arr):
    cum_sum_arr = plb.cumsum(arr)
    cum_avg_list = []
    for i, x in enumerate(cum_sum_arr, 1):
        if i > 1:
            cum_avg_list.append(x / i)
    cum_avg_arr = plb.asarray(cum_avg_list)
    print("The CMA of this SMA is: ", cum_avg_arr)
示例#22
0
 def display_images(index1, index2):
     pylab.figure(1)
     for ax, index in ([img1_ax, index1], [img2_ax, index2]):
         ax.imshow(pylab.asarray(afs[keys[index]].lf),
                   cmap=pylab.cm.gray,
                   interpolation='nearest')
         ax.set_title('Frame %d' % keys[index])
     pylab.draw()
示例#23
0
 def update(self, state, action, nstep_return):
     current_estimate = self.get_value(state, action)
     value = nstep_return - current_estimate
     scaled_state = np.multiply(
         np.asarray(state).flatten(), self.tiling_side_length)
     tile_indices = asarray(tiles(self.iht, self.num_tilings, scaled_state),
                            dtype=int) + (action * self.num_tiles)
     self.theta[tile_indices] += self.alpha * value
    def get_value(self, state, action):
        scaled_state = np.multiply(np.asarray(state).flatten(), self.scale_factor)

        tile_indices = asarray(
            tiles(self.iht, self.num_tilings, scaled_state),
            dtype=int) + (action * self.num_tiles)

        return sum(self.theta[tile_indices])
示例#25
0
文件: pt.py 项目: jizhi/project_TL
def sigma2fromPk(c, r):
    """
    calculate sigma^2 from pk

    this function can be called with vectors or scalars, but always returns a vector
    """
    r = M.asarray(r)
    return 9.0 / r ** 2 * N.trapz(c.k * c.pk * sf.j1(M.outer(r, c.k)) ** 2, M.log(c.k)) / 2.0 / M.pi ** 2
示例#26
0
文件: pt.py 项目: jizhi/project_TL
def xi2fromCambPk(c, r):
    """
    calculate 2pt corr. function from Camb instance (with its
    associated k,pk)

    this function can be called with vectors
    """
    r = M.asarray(r)
    return N.trapz(c.k ** 3 * c.pk * sf.j0(M.outer(r, c.k)), M.log(c.k)) / 2.0 / M.pi ** 2
示例#27
0
def HJacobian_at(x):
    values = {est_x: x.item(0), est_y: x.item(1)}
    values[s1_x] = radar.sensors[0][0]
    values[s1_y] = radar.sensors[0][1]
    values[s2_x] = radar.sensors[1][0]
    values[s2_y] = radar.sensors[1][1]
    values[s3_x] = radar.sensors[2][0]
    values[s3_y] = radar.sensors[2][1]
    return asarray(jacobian.evalf(subs=values))
示例#28
0
def gaussiannd(f, a, b, N=5):
    """ Multidimensional Gaussian quadrature.
    Here, f is the integrand, a and b are arrays giving the limits
    of the integral, and N is the number of abscissas. """

    a = pylab.asarray(a)
    b = pylab.asarray(b)
    ndim = a.size
    if a.size == 1:  # use normal 1d Gaussian quadrature
        return gaussian(f, a, b)
    fac = 0.5 * (b - a)
    mid = 0.5 * (b + a)
    s = 0.0
    # loop over all possible ndim-vectors of abscissas
    for xw in itertools.product(abscissas(N), repeat=ndim):
        x = pylab.array([x for (x, _) in xw])
        w = pylab.prod([w for (_, w) in xw])
        s += w * f(fac * x + mid)
    return pylab.prod(fac) * s
    def get_next_states_values(self, state):
        scaled_state = np.multiply(np.asarray(state).flatten(), self.scale_factor)

        values = np.zeros(self.num_actions)
        for action in range(self.num_actions):
            tile_indices = asarray(
                tiles(self.iht, self.num_tilings, scaled_state),
                dtype=int) + (action * self.num_tiles)
            values[action] = np.sum(self.theta[tile_indices])
        return values
示例#30
0
def variance_of_laplacian(filename, heightDivisor, widthDivisor, counter):
    # Variables
    image = Image.open(filename)  # open file
    (rWidth,
     rHeight) = image.size  # returns width, height or x, y or cols, rows
    image.close()
    # close file
    totalImageRows = pl.arange(rHeight / heightDivisor)
    totalImageColumns = pl.arange(rWidth / widthDivisor)
    numRows_MiniMatrix = pl.arange(heightDivisor)
    numCols_MiniMatrix = pl.arange(widthDivisor)
    miniMatrix = pl.zeros(
        (heightDivisor, widthDivisor
         ))  # heightDivisor x widthDivisor Matrix to copy elements to
    varianceMatrix = pl.zeros((rHeight / heightDivisor, rWidth / widthDivisor))
    Laplacian_Kernel = (pl.array([[0., -1., 0.], [-1., 4., -1.], [0., -1., 0.]
                                  ])) * (1. / 60)

    # 1. Convert image to matrix.
    toBeConverted = pil.Image.open(filename)
    # resize (columns, width)
    toBeConverted = toBeConverted.resize(
        [(rWidth / widthDivisor) * widthDivisor,
         (rHeight / heightDivisor) * heightDivisor],
        resample=Image.LANCZOS)  # resize using LANCZOS filtering
    # truncating the width and height so that they're divisible by heightDivisor & widthDivisor
    imageMatrix = pl.asarray(toBeConverted.convert(
        'L'))  # convert image to greyscale; return matrix
    toBeConverted.close()
    # close file

    # 2. Split Image into sub-matrices, each of size heightDivisor x widthDivisor. For each
    #    heightDivisor x widthDivisor sub-matrix, convolve with the kernel. Calculate the variance
    #    of this convolution. Place variance in a (rHeight / heightDivisor) x (rWidth / widthDivisor) matrix.

    print "\t[", counter, "] Convolving subdivisions of image with Laplacian Kernel... Calculating Variance... ",
    for subset_of_rows in totalImageRows:  # TOTAL Image Matrix
        for subset_of_columns in totalImageColumns:
            image_row = subset_of_rows * heightDivisor  # keeps track of larger matrix's row index to copy from
            image_col = subset_of_columns * widthDivisor  # keeps track of larger matrix's dolumn index to copy from
            for row in numRows_MiniMatrix:
                for col in numCols_MiniMatrix:
                    miniMatrix[row][col] = imageMatrix[image_row +
                                                       row][image_col + col]
            # 3. Convolve part of the image with the Laplacian kernel
            Convolve = signal.fftconvolve(miniMatrix,
                                          Laplacian_Kernel,
                                          mode='full')
            # 4. Compute the variance of the convolution.
            Variance = pl.var(Convolve)
            # 5. Store variance in entry of varianceMatrix
            varianceMatrix[subset_of_rows][subset_of_columns] = Variance
    # 6. return the varianceMatrix
    print "Done."
    return varianceMatrix
示例#31
0
def astausgleich(ab2org, mn2org, rhoaorg):
    """shifts the branches of a dc sounding to generate a matching curve."""
    ab2 = P.asarray(ab2org)
    mn2 = P.asarray(mn2org)
    rhoa = P.asarray(rhoaorg)
    um = P.unique(mn2)
    for i in range(len(um) - 1):
        r0, r1 = [], []
        ac = P.intersect1d(ab2[mn2 == um[i]], ab2[mn2 == um[i + 1]])
        for a in ac:
            r0.append(rhoa[(ab2 == a) * (mn2 == um[i])][0])
            r1.append(rhoa[(ab2 == a) * (mn2 == um[i + 1])][0])

        if len(r0) > 0:
            fak = P.mean(P.array(r0) / P.array(r1))
            print(fak)
            if P.isfinite(fak) and fak > 0.:
                rhoa[mn2 == um[i + 1]] *= fak

    return rhoa  # formerly pg as vector
示例#32
0
def astausgleich(ab2org, mn2org, rhoaorg):
    """shifts the branches of a dc sounding to generate a matching curve."""
    ab2 = P.asarray(ab2org)
    mn2 = P.asarray(mn2org)
    rhoa = P.asarray(rhoaorg)
    um = P.unique(mn2)
    for i in range(len(um) - 1):
        r0, r1 = [], []
        ac = P.intersect1d(ab2[mn2 == um[i]], ab2[mn2 == um[i + 1]])
        for a in ac:
            r0.append(rhoa[(ab2 == a) * (mn2 == um[i])][0])
            r1.append(rhoa[(ab2 == a) * (mn2 == um[i + 1])][0])

        if len(r0) > 0:
            fak = P.mean(P.array(r0) / P.array(r1))
            print(fak)
            if P.isfinite(fak) and fak > 0.:
                rhoa[mn2 == um[i + 1]] *= fak

    return rhoa  # formerly pg as vector
示例#33
0
def trapz(y, x=None, ax=-1, method=pl.add.reduce):
    """trapz(y,x=None,ax=-1) integrates y along the given dimension of
    the data array using the trapezoidal rule.

    you can call it with method=pl.add.accumulate to yield partial sums
    """
    y = pl.asarray(y)
    if x is None:
        d = 1.0
    else:
        d = pl.diff(x,axis=ax)
    y = pl.asarray(y)
    nd = len(y.shape)
    s1 = nd*[slice(None)]
    s2 = nd*[slice(None)]
    s1[ax] = slice(1,None)
    s2[ax] = slice(None,-1)

    ans = method(0.5* d * (y[s1]+y[s2]),ax)
    return ans
示例#34
0
 def matrix_show(arr, nx):
     try:  # import MatPlotLib if available
         from pylab import imshow, show, cm, asarray
     except ImportError:
         return "MatPlotLib library (http://matplotlib.sourceforge.net) cannot be imported."
     else:
         matrix = [arr[i:i + nx] for i in xrange(0, len(arr), nx)][::-1]
         matrix = asarray(matrix, dtype="UInt8")
         imshow(matrix, cmap=cm.gray, interpolation="nearest")
         show()
         return "MatPlotLib successiful structure plot show."
示例#35
0
 def matrix_show(arr, nx):
     try: # import MatPlotLib if available
         from pylab import imshow, show, cm, asarray
     except ImportError:
         return "MatPlotLib library (http://matplotlib.sourceforge.net) cannot be imported."
     else:
         matrix = [arr[i:i+nx] for i in xrange(0, len(arr), nx)][::-1]
         matrix = asarray(matrix, dtype="UInt8")
         imshow(matrix, cmap=cm.gray, interpolation="nearest")
         show()
         return "MatPlotLib successiful structure plot show."
示例#36
0
 def calcunc(self,tdDatas):
     #not used anymore, older version, should we remove it???
      #tdDatas is a np array of tdData measurements
     if tdDatas.shape[0]==1:
         repeatability=py.zeros((len(tdDatas[0,:,0]),2))
     else:
         repeatability=py.std(py.asarray(tdDatas[:,:,1:3]),axis=0, ddof = 1)/py.sqrt(self.numberOfDataSets)
     #this line is wrong
     elnNoise=tdDatas[0,:,3:]
     uncarray = py.sqrt(repeatability**2 + elnNoise**2)
     
     return uncarray
示例#37
0
文件: pt.py 项目: jizhi/project_TL
def xi2fromPk(k, pk, r):
    """
    calculate 2pt corr. function from k, p(k).
    It doesn't seem to work particularly well, though.

    this function can be called with vectors
    """
    r = M.asarray(r)

    print len(k), len(pk), len(r)

    return N.trapz(k ** 3 * pk * sf.j0(M.outer(r, k)), M.log(k)) / 2.0 / M.pi ** 2
示例#38
0
文件: pt.py 项目: jizhi/project_TL
def wPk(c, r, w):
    """
    convolve Pk with an arbitrary window function w(kr)
    
    int k^2 P(k)w(kr) dk/2/pi^2

    e.g., the previous two functions can be realized as
    wPk(c,r,j0)
    wPk(c,r,lambda x: (3*pt.sf.j1(x)/x)**2))
    """
    r = M.asarray(r)
    return N.trapz(c.k ** 3 * c.pk * w(M.outer(r, c.k)), M.log(c.k)) / 2.0 / M.pi ** 2
示例#39
0
def plot_timeseries(data, style='-', **kwargs):
    try:
        step = kwargs.pop('step')
        ts = data.reduce_avg(data.begin - data.begin % step, step)
    except KeyError:
        ts = data

    x = __x_from_ts(ts)
    line = pylab.plot(x, pylab.asarray(ts), style, **kwargs)[0]
    ax = pylab.gca()
    ax.xaxis_date()
    return line
示例#40
0
 def __init__(self,
              cells,
              value_function,
              cmap=pylab.cm.jet,
              hold=True,
              vmin=None,
              vmax=None,
              **kwargs):
     self.cells = cells
     self.__f = value_function
     was_interactive = pylab.isinteractive()
     if was_interactive: pylab.ioff()
     geos = []
     self.maxvalue = -1e300 if vmax is None else vmax
     self.minvalue = 1e300 if vmin is None else vmin
     self.polygons = {}
     # Generate image array, which is filled with generated values from self.f
     self._A = []
     for cell in cells:
         shape = geoms[cell]
         if hasattr(shape, "geoms"):
             shapes = shape.geoms
         else:
             shapes = [shape]
         value = self.f(cell)
         self._A.append(value)
         if vmax is None:
             self.maxvalue = max(value, self.maxvalue)
         if vmin is None:
             self.minvalue = min(value, self.minvalue)
         for s in shapes:
             geos.append((cell, s, value))
     if self.minvalue >= self.maxvalue: self.minvalue = self.maxvalue - 1
     for cell, s, v in geos:
         if hasattr(s, "exterior"):
             s = s.exterior
         c = cmap(
             float(v - self.minvalue) /
             float(self.maxvalue - self.minvalue))
         a = pylab.asarray(s)
         self.polygons[cell] = pylab.fill(a[:, 0],
                                          a[:, 1],
                                          fc=c,
                                          hold=hold,
                                          **kwargs)[0]
         hold = 1
     pylab.axis('equal')
     norm = pylab.matplotlib.colors.Normalize(self.minvalue, self.maxvalue)
     pylab.matplotlib.cm.ScalarMappable.__init__(self, norm, cmap)
     if was_interactive:
         pylab.draw()
         pylab.ion()
示例#41
0
    def get_next_states_values(self, state):
        scaled_state = []
        for i in range(len(state)):
            scaleFactor = self.numTilings / self.state_space_range[i]
            scaled_state.append(scaleFactor * state[i])

        values = zeros(self.numActions)
        for action in range(self.numActions):
            tile_indices = asarray(tiles(self.iht, self.numTilings,
                                         scaled_state),
                                   dtype=int) + (action * self.numTiles)
            values[action] = sum(self.theta[tile_indices])
        return values
示例#42
0
    def calculaten(self,H,l):
        #this calculates n for a given transferfunction H and a given thickness l
    
        #calculate the initial values
        inits=py.asarray(self.calculateinits(H,l))
        
        res=[] #the array of refractive index
        vals=[] # the array of error function values
        bnds=((1,None),(0,None)) #not used at the moment, with SLSQP bnds can be introduced
        nums=len(H[:,0])
        #minimize the deviation of H_theory to H_measured for each frequency
        for i in range(nums):
#            t=minimize(self.error_func,[inits[0,i],inits[1,i]],args=(H[i,:2],l), method='SLSQP',\
#            bounds=bnds, options={'ftol':1e-9,'maxiter':2000, 'disp': False})
            t=minimize(self.error_func,[inits[0,i],inits[1,i]],args=(H[i,:3],l), method='Nelder-Mead',
            options={'xtol': 1e-6,'disp':False})
            res.append(t.x[0]-1j*t.x[1])
            vals.append(t.fun)
        n=py.asarray(res)
        #self.n is a 5xlengthf array, frequency,n_real,n_imag,n_smoothed_real,n_smoothed_imag
        self.n=py.column_stack((H[:,0],n,n))
        return n
示例#43
0
 def plot(self):
     self.i = 0
     self.j = 0
     self.lms = []
     self.skipinc = 4
     self.numsections = self.pingmax.shape[2]
     if self.numsections > 60:
         self.numsections = 60
     for section in range(0, self.numsections, self.skipinc):
         minpower = pl.asarray(self.settings['power']).min()
         maxpower = pl.asarray(self.settings['power']).max()
         mags = self.pingmax[:,:,section]
         maskedmags = pl.ma.array(mags, mask = pl.isnan(mags))
         maxintensity = maskedmags.max()
         minintensity = maskedmags.min()
         self.fig = pl.figure()
         self.ax = self.fig.add_subplot(111, xlim=(minpower - 10,maxpower + 10), ylim=(minintensity - 10,maxintensity + 10), autoscale_on=False)
         self.ax.set_xlabel('Reported Power (dB)')
         self.ax.set_ylabel('Corrected 20*log10(recieved magnitude)')
         title = 'Range bin ' + str(section)
         self.ax.set_title(title)
         self.ax.grid()
         self.lms.append(LassoManager(self.ax, self.settings['power'], maskedmags))
         pl.draw()
示例#44
0
def montecarlo(f, a, b, eps=1e-3, nmin=100, nmax=1000000):
    """ Monte Carlo integration.
    Here, f is the integrand, a and b are arrays giving the limits
    of the integral, and eps is the desired accuracy.
    The parameters nmin and nmax specify the minimum and
    maximum number of random points to use. """

    a = pylab.asarray(a)
    b = pylab.asarray(b)
    vol = pylab.prod(b - a)
    s = 0.0  # running average of f(x)
    ssq = 0.0  # running sum of (f(x)-s)**2
    n = 0
    while n < nmax:
        n += 1
        x = pylab.uniform(a, b)
        fx = f(x)
        d = fx - s
        s += d / n
        ssq += d * (fx - s)
        err = ssq**0.5 / n  # assume n-1 ~= n
        if n > nmin and err < eps * abs(s):
            break
    return vol * s
示例#45
0
 def findLintelli(self):
     #find the frequency with the most amplitude
     fmax=self.H.fdref.getmaxfreq()
     
     #overthink once more the cropping length
     n=self.n_estimated
     #oscillation period can be calculated from H data!
     f_span=c/2*1/self.l_estimated*1/n #(this should be the length of the oscillation)
     #restrict H to only 5 oscillations
     H_small=self.H.getcroppedData(self.H.fdData,fmax-f_span*1,fmax+f_span*4)
     py.figure(33)
     #minimize quasispace/totalvariation value
     t=minimize(self.errorL,self.userthickness,args=((py.asarray(H_small),)),\
     method='Nelder-Mead', options={'xtol':1e-6,'disp': False})#, 'disp': False})
     return t.x[0]
示例#46
0
    def easy_parallize(f, sequence):
        # I didn't see gains with .dummy; you might
        from multiprocessing import Pool
        pool = Pool(processes=8)
        #from multiprocessing.dummy import Pool
        #pool = Pool(16)

        # f is given sequence. Guaranteed to be in order
        result = pool.map(f, sequence)
        cleaned = [x for x in result if not x is None]
        cleaned = asarray(cleaned)
        # not optimal but safe
        pool.close()
        pool.join()
        return cleaned
    def easy_parallize(f, sequence):
        # I didn't see gains with .dummy; you might
        from multiprocessing import Pool
        pool = Pool(processes=8)
        #from multiprocessing.dummy import Pool
        #pool = Pool(16)

        # f is given sequence. guaranteed to be in order
        result = pool.map(f, sequence)
        cleaned = [x for x in result if not x is None]
        cleaned = asarray(cleaned)
        # not optimal but safe
        pool.close()
        pool.join()
        return cleaned
示例#48
0
    def _redraw(self, db, tofile=0, eta=None):
        """ redraw the updated grid interactively """
        import pylab as p

        if len(db) <= 3 or not self.plotflag:
            return
        begin_level = round(max(map(lambda (x): x[2], db))) - 3
        step_size = 0.25
        nContours = 25
        suffix = ""
        if eta is not None:
            suffix = " (ETA: %5.2f min)" % eta

        def cmp(x, y):
            if x[0] < y[0]:
                return -1
            if x[0] > y[0]:
                return 1
            if x[1] > y[1]:
                return -1
            if x[1] < y[1]:
                return 1
            return 0

        db.sort(cmp)
        dbarr = p.asarray(db)
        # reconstruct grid: array is ordered along first and second dimension
        x = dbarr[:, 0]
        dimy = len(x[x == x[0]])
        dimx = x.size / dimy
        print "plotting: ", dimx, dimy
        x = x.reshape(dimx, dimy)
        y = dbarr[:, 1]
        y = y.reshape(dimx, dimy)
        z = dbarr[:, 2].reshape(dimx, dimy)

        # plot using manual double buffer
        p.ioff()
        p.clf()
        p.contourf(x, y, z, nContours)
        p.hsv()
        p.colorbar()
        p.xlim(self.usermin[0], self.usermax[0])
        p.ylim(self.usermin[1], self.usermax[1])
        p.xlabel(r"$\rm{log}_2(C)$")
        p.ylabel(r"$\rm{log}_2(\gamma)$")
        p.ion()
        p.draw_if_interactive()
示例#49
0
def get_colours(n):
    """ Return n pastel colours. """
    from matplotlib.colors import colorConverter
    from pylab import (asarray, linspace)

    base = asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    if n <= 3:
        return base[0:n]
    # how many new colours to we need to insert between
    # red and green and between green and blue?
    needed = (((n - 3) + 1) / 2, (n - 3) / 2)
    colours = []
    for start in (0, 1):
        for x in linspace(0, 1, needed[start] + 2):
            colours.append((base[start] * (1.0 - x)) + (base[start + 1] * x))
    return [pastel(c) for c in colours[0:n]]
示例#50
0
def get_colours(n):
    """ Return n pastel colours. """
    from matplotlib.colors import colorConverter
    from pylab import asarray, linspace

    base = asarray([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
    if n <= 3:
        return base[0:n]
    # how many new colours to we need to insert between
    # red and green and between green and blue?
    needed = (((n - 3) + 1) / 2, (n - 3) / 2)
    colours = []
    for start in (0, 1):
        for x in linspace(0, 1, needed[start] + 2):
            colours.append((base[start] * (1.0 - x)) + (base[start + 1] * x))
    return [pastel(c) for c in colours[0:n]]
示例#51
0
 def _removeTimeShift(self,tdDatas):
     #not sure if needed, maybe we want to correct the rawdata by shifting the maxima on top of each other
     #the indices of the maxima        
     #takes at the moment only the X Channel data and corrects it (safer!)
     peak_pos=[]
     for tdData in tdDatas:
         time_max_raw=tdData[py.argmax(tdData[:,1]),0]
         thisPeakData=self.getShorterData(tdData,time_max_raw-0.5e-12,time_max_raw+0.5e-12)
         thisPeakData=self.getInterData(thisPeakData,len(thisPeakData[:,0])*20,thisPeakData[0,0],thisPeakData[-1,0],'cubic')
         peak_pos.append(thisPeakData[py.argmax(thisPeakData[:,1]),0])
     
     peak_pos=py.asarray(peak_pos)
     mp=py.mean(peak_pos)
     for i in range(len(tdDatas)):
         tdDatas[i][:,0]-=(peak_pos[i]-mp)
     return tdDatas,py.std(peak_pos)
示例#52
0
def get_data(ifname):
    '''Get spectrum for a given input filename
    '''

    bla = False

    i = ifname.rfind('.')

    if i == -1:
        name = ifname
        ext = ''
    else:
        name = ifname[:i]
        ext = ifname[i + 1:]

    if bla:
        print name, ext

    if ext in ['dat', 'txt', 'convol', 'spec', 'tmp', 'cvl', '']:
        data = pl.loadtxt(ifname)
    elif ext == 'bin':
        data = []
        ifile = open(ifname, 'rb')
        while True:
            record = ifile.read(16)
            if len(record) != 16:
                break
            record = struct.unpack('dd', record)
            data.append(record)
        data = pl.asarray(data)
    elif ext in ['fits', 'fit']:
        data = load_fits(ifname, False, extn)
    else:
        try:
            data = pl.loadtxt(ifname)
        except:
            print "Unknown format of input spectra."
            quit(1)

    # Remove possible nan in the input data
    idx, = pl.where(data[:, 1] == data[:, 1])  # idx is a tuple
    data = data[idx, :]
    idx, = pl.where(data[:, 0] == data[:, 0])
    data = data[idx, :]

    return data
 def get_NVratio(self,maxwavelen=638,*args,**kwargs):
     """
     Finds best focus at given wavelength for each point, 
     
     maxwavelen: int or float, optional
         Default is 638nm, maximizing to NVm ZPL
     """
     NVvalues=[]
     if self._focusedSpec==[]:
         indexList = self.find_focus(maxwavelen)
         
     for point in self._focusedSpec:
         NVvalues.append(point.get_NVratio(*args,**kwargs))
     
     NVvalues = py.asarray(NVvalues)
     
     return NVvalues
示例#54
0
def get_data(ifname):
    '''Get spectrum for a given input filename
    '''

    bla = False

    i = ifname.rfind('.')

    if i == -1:
        name = ifname
        ext = ''
    else:
        name = ifname[:i]
        ext = ifname[i+1:]

    if bla:
        print name, ext

    if ext in ['dat', 'txt', 'convol', 'spec', 'tmp', 'cvl', '']:
        data = pl.loadtxt(ifname)
    elif ext == 'bin':
        data = []
        ifile = open(ifname, 'rb')
        while True:
            record = ifile.read(16)
            if len(record) != 16:
                break
            record = struct.unpack('dd', record)
            data.append(record)
        data = pl.asarray(data)
    elif ext in ['fits', 'fit']:
        data = load_fits(ifname, False, extn)
    else:
        try: 
            data = pl.loadtxt(ifname)
        except:
            print "Unknown format of input spectra."
            quit(1)

    # Remove possible nan in the input data
    idx, = pl.where(data[:, 1] == data[:, 1]) # idx is a tuple
    data = data[idx, :]
    idx, = pl.where(data[:, 0] == data[:, 0])
    data = data[idx, :]

    return data
示例#55
0
 def finddepth(self, showdepth = False):
     """find all the 7006 ranges that have both good colinearity and
     brightness, and bin them and use the bin with the maximum number as
     the depth for that beam."""
     print "Extracting depth information...",
     depth = pl.zeros((self.num7006, self.numbeams))
     depth[:,:] = pl.nan
     flagmask = pl.zeros(self.numbeams, dtype=int)
     flagmask[:] = 3
     for i in xrange(self.num7006):
         self.calfile.getrecord(7006, i)
         pingflags = pl.asarray(self.calfile.packet.subpack.data[1, :], dtype=int)
         indx = pl.find((flagmask & pingflags) == 3)
         depth[i, indx] = self.calfile.packet.subpack.data[0,indx]    
     print "estimating the depth...",
     depthmax = pl.nanmax(depth)
     self.depthbins = pl.arange(0, depthmax + self.windowlen, self.windowlen)
     self.depthestimate = pl.zeros(self.numbeams)
     for j in xrange(self.numbeams):
         h = pl.histogram(depth[:, j], bins = self.depthbins)
         self.depthestimate[j] = h[1][h[0].argmax()]
     print "depth estimation completed, binning beams."
     # sort beams with similar ranges into a list
     self.beamlisting = []
     for bin in self.depthbins:
         indx = pl.find((self.depthestimate < bin + self.windowlen) 
             & (self.depthestimate >= bin - self.windowlen))
         if len(indx > 0):
             self.beamlisting.append(indx)
     if showdepth:
         fig = pl.figure()
         ax = fig.add_subplot(111)
         ax.plot(depth.T, 'x')
         ax.plot(self.depthestimate,'o')
         ax.set_xlabel('Beam Number')
         ax.set_ylabel('Time (seconds)')
         ax.set_title('Bottom Detections')
         pl.xlim((0,self.numbeams))
         ymin, ymax = pl.ylim()
         pl.ylim((ymax, ymin))
         ax.grid()
         pl.draw()
示例#56
0
    def _redraw(self, db, tofile=0, eta=None):
        """ redraw the updated grid interactively """
        import pylab as p
        if len(db) <= 3 or not self.plotflag: return
        begin_level = round(max(map(lambda (x): x[2], db))) - 3
        step_size = 0.25
        nContours = 25
        suffix = ''
        if eta is not None:
            suffix = " (ETA: %5.2f min)" % eta

        def cmp(x, y):
            if x[0] < y[0]: return -1
            if x[0] > y[0]: return 1
            if x[1] > y[1]: return -1
            if x[1] < y[1]: return 1
            return 0

        db.sort(cmp)
        dbarr = p.asarray(db)
        # reconstruct grid: array is ordered along first and second dimension
        x = dbarr[:, 0]
        dimy = len(x[x == x[0]])
        dimx = x.size / dimy
        print 'plotting: ', dimx, dimy
        x = x.reshape(dimx, dimy)
        y = dbarr[:, 1]
        y = y.reshape(dimx, dimy)
        z = dbarr[:, 2].reshape(dimx, dimy)

        # plot using manual double buffer
        p.ioff()
        p.clf()
        p.contourf(x, y, z, nContours)
        p.hsv()
        p.colorbar()
        p.xlim(self.usermin[0], self.usermax[0])
        p.ylim(self.usermin[1], self.usermax[1])
        p.xlabel(r'$\rm{log}_2(C)$')
        p.ylabel(r'$\rm{log}_2(\gamma)$')
        p.ion()
        p.draw_if_interactive()
示例#57
0
def norm_hist_bins(y, bins=10, normed='height'):
    """Just like the matplotlib mlab.hist, but can normalize by height.

    normed can be 'area' (produces matplotlib behavior, area is 1), 
    any False value (no normalization), or any True value (normalization).

    Original docs from matplotlib:

    Return the histogram of y with bins equally sized bins.  If bins
    is an array, use the bins.  Return value is
    (n,x) where n is the count for each bin in x

    If normed is False, return the counts in the first element of the
    return tuple.  If normed is True, return the probability density
    n/(len(y)*dbin)
    
    If y has rank>1, it will be raveled
    Credits: the Numeric 22 documentation
    """
    y = asarray(y)
    if len(y.shape)>1: y = ravel(y)

    if not iterable(bins):
        ymin, ymax = min(y), max(y)
        if ymin==ymax:
            ymin -= 0.5
            ymax += 0.5

        if bins==1: bins=ymax
        dy = (ymax-ymin)/bins
        bins = ymin + dy*arange(bins)
    n = searchsorted(sort(y), bins)
    n = diff(concatenate([n, [len(y)]]))
    if normed:
        if normed == 'area':
            db = bins[1]-bins[0]
        else:
            db = 1.0
        return 1/(len(y)*db)*n, bins
    else:
        return n, bins
示例#58
0
def norm_hist_bins(y, bins=10, normed='height'):
    """Just like the matplotlib mlab.hist, but can normalize by height.

    normed can be 'area' (produces matplotlib behavior, area is 1), 
    any False value (no normalization), or any True value (normalization).

    Original docs from matplotlib:

    Return the histogram of y with bins equally sized bins.  If bins
    is an array, use the bins.  Return value is
    (n,x) where n is the count for each bin in x

    If normed is False, return the counts in the first element of the
    return tuple.  If normed is True, return the probability density
    n/(len(y)*dbin)
    
    If y has rank>1, it will be raveled
    Credits: the Numeric 22 documentation
    """
    y = asarray(y)
    if len(y.shape)>1: y = ravel(y)

    if not iterable(bins):
        ymin, ymax = min(y), max(y)
        if ymin==ymax:
            ymin -= 0.5
            ymax += 0.5

        if bins==1: bins=ymax
        dy = (ymax-ymin)/bins
        bins = ymin + dy*arange(bins)
    n = searchsorted(sort(y), bins)
    n = diff(concatenate([n, [len(y)]]))
    if normed:
        if normed == 'area':
            db = bins[1]-bins[0]
        else:
            db = 1.0
        return 1/(len(y)*db)*n, bins
    else:
        return n, bins
示例#59
0
    def __init__( self, mua_data, dt, lfp_data=None,
                  z_start=0., z_space=0.1, casename='', rneg_factor=1E6,
                  tstim=0, sub_at='base',
                  verbose=False ):
        '''
        This function ...
    
        Aguments
        --------

        Keyword arguments
        -----------------

        '''
        
        if verbose:
            msg = 'This is class *LPA_Signal* in *pyLPA* module'
            print msg
        
        if not oopt_import:
            print oopt_import_msg
            
        self.nstim, self.ntime, self.nchan = pl.asarray(mua_data).shape

        self.dt = dt
        self.z_start = z_start
        self.z_space = z_space
        self.el_coords = pl.arange(z_start, z_start+self.nchan*z_space, z_space)
        self.rneg_factor = rneg_factor
        self.tstim = tstim
        self.sub_at = sub_at
        self.verbose = verbose

        self.tstim_idx = pl.floor(self.tstim/self.dt)
        
        # create matrices and calculate variances
        self.importDataset(mua_data, 'MUA')
        if not lfp_data==None:
            self.importDataset(lfp_data, 'LFP')
示例#60
0
def _createMmat( M_params, el_coords ):
    '''
    This function ...
    
    Aguments
    --------

    Keyword arguments
    -----------------
    '''
    # Assume now that all measures are real measures in mm
    M_params = pl.asarray(M_params)
        
    nchan = el_coords.size
    
    npop = M_params.shape[0]/3
    z0=M_params[:npop]
    a=M_params[npop:2*npop]
    b=M_params[2*npop:]                
        
    # Create the Mmat matrix
    Mmat = pl.zeros((nchan, npop))
            
    # Update the basis matrix, Mmat
    for ipop in xrange(npop):
        # tmpvec is the distance between z0[ipop] and all electrode contacts
        tmpvec = abs(el_coords - z0[ipop])
        # Set all entries that are closer than a[ipop] to 1
        Mmat[pl.find(tmpvec < a[ipop]), ipop] = 1
        # Find the entries that are on the olique part of the profile and set
        # the right value
        tmpvec = tmpvec - a[ipop]
        cond1 = pl.find(tmpvec >= 0)
        cond2 = pl.find(tmpvec < b[ipop])
        isect = filter(lambda x: x in cond1, cond2)
        Mmat[isect, ipop] = 1 - 1 / b[ipop] * (tmpvec[isect])
        
    return Mmat