コード例 #1
0
def linename_to_restwl(linelistfile=tablepath + 'linelist.txt',
                       outfile=tablepath + 'newlinelist.txt'):

    lines = readcol(linelistfile, fsep='|', twod=False, comment="#")
    outf = open(outfile, 'w')

    for line in transpose(lines):
        name = line[0]
        jre = re.compile('\(([0-9]*)\)').search(name)
        if jre == None:
            print >> outf, "%10s|%10s" % (line[0], line[1])
            continue
        else:
            jl = int(jre.groups()[0])
        if name[4] == 'S':
            ju = jl + 2
        elif name[4] == 'Q':
            ju = jl
        elif name[4] == 'O':
            ju = jl - 2
        else:
            print >> outf, "%10s|%10s" % (line[0], line[1])
            continue
        vu = int(name[0])
        vl = int(name[2])
        rwl = restwl(vu, vl, ju, jl)
        if rwl == 0:
            rwl = float(line[1])
        print >> outf, "%10s|%10.8f" % (name, rwl)
コード例 #2
0
ファイル: h2fit.py プロジェクト: Fade89/agpy
def linename_to_restwl(linelistfile = tablepath+'linelist.txt',outfile=tablepath+'newlinelist.txt'):

    lines = readcol(linelistfile,fsep='|',twod=False,comment="#")
    outf = open(outfile,'w')

    for line in transpose(lines):
        name = line[0]
        jre = re.compile('\(([0-9]*)\)').search(name)
        if jre == None:
            print >>outf, "%10s|%10s" % (line[0],line[1])
            continue
        else:
            jl = int( jre.groups()[0] )
        if name[4] == 'S':
            ju = jl + 2
        elif name[4] == 'Q': 
            ju = jl
        elif name[4] == 'O':
            ju = jl - 2
        else:
            print >>outf, "%10s|%10s" % (line[0],line[1])
            continue
        vu = int( name[0] )
        vl = int( name[2] )
        rwl = restwl(vu,vl,ju,jl)
        if rwl == 0:
            rwl = float(line[1])
        print >>outf,"%10s|%10.8f" % (name,rwl)
コード例 #3
0
ファイル: h2fit.py プロジェクト: Fade89/agpy
def readspec(image,noiseimage,
    linelistfile = '/Users/adam/work/IRAS05358/code/linelist.txt',
    path_obs='', #'/Users/adam/observations/IRAS05358/UT090108/',
    noiseaperture=[0,10],
    aperture=[],
    nameregex='2-1 S\(1\)|1-0 S\([1379028]\)|1-0 Q\([1234]\)|3-2 S\([35]\)|4-3 S\(5\)',
    apname='',
    vlsrcorr=0):

    regex = re.compile(nameregex)
    im = pyfits.open(path_obs+image)
    noiseim = pyfits.open(path_obs+noiseimage)
    wlA = im[0].header['CRVAL1'] + im[0].header['CD1_1'] * ( arange(im[0].data.shape[1]) - im[0].header['CRPIX1'] + 1)
    data = im[0].data
    atmospec = median(noiseim[0].data[noiseaperture[0]:noiseaperture[1]],axis=0)

    countsperflux = 2.25e18
    stdatmo = noiseim[0].data[noiseaperture[0]:noiseaperture[1]].std(axis=0)            # std. dev. of non-backsubtracted data
    poisserr = sqrt(abs(noiseim[0].data).mean(axis=0) * countsperflux) / countsperflux  # Poisson noise (very approximate correction)
    errspec = sqrt( stdatmo**2 + 2*poisserr**2 )                 # poisson statistics - once for estimation of the noise, once for the subtraction
    errspec /= atmotrans_vect(wlA)**2                                                  # Weight by inverse of atmospheric transmission^2

    lines = readcol(linelistfile,fsep='|',twod=False)
    lines[1] = asarray(lines[1],dtype='float')*1e4 # convert microns to angstroms

    specsegments=[]
    for line in transpose(lines):
        wl = float(line[1])
        name = line[0]
        if wl > wlA.min() and wl < wlA.max() and regex.search(name) != None:
            closest = argmin(abs(wlA-wl))
            minind = closest-7
            maxind = closest+7

            if len(aperture) != 0:
                savedata = data[aperture[0]:aperture[1],minind:maxind].sum(axis=0)
            else:
                savedata = data[:,minind:maxind]
            specsegments.append({
                'name':name,
                'apname':apname,
                'linewl':wl,
                'index':closest,
                'minind':minind,
                'maxind':maxind,
                'vlsrcorr':vlsrcorr,
                'wavelength':wlA[minind:maxind],
                'data':savedata,
                'noback':atmospec[minind:maxind],
                'err':errspec[minind:maxind],
                'model':data[0,minind:maxind]*0
#                'smoothdata':convolve( data[:,minind:maxind] , hanning(3) , 'same')
                })

    print "Done finding lines"            

    return specsegments
コード例 #4
0
def planetflux(planet, date):
    if planet == 'mars':
        mars = readcol('/Users/adam/work/bgps_pipeline/calibration/MARS.DAT',
                       asStruct=True)
        closest = numpy.argmin(numpy.abs(date - mars.MJD))
        return mars.fluxbeam[closest]
    elif planet == 'uranus':
        uranus = readcol(
            '/Users/adam/work/bgps_pipeline/calibration/URANUS.DAT',
            asStruct=True)
        closest = numpy.argmin(numpy.abs(date - uranus.MJD))
        return uranus.fluxbeam[closest]
    elif planet == 'neptune':
        neptune = readcol(
            '/Users/adam/work/bgps_pipeline/calibration/NEPTUNE.DAT',
            asStruct=True)
        closest = numpy.argmin(numpy.abs(date - neptune.MJD))
        return neptune.fluxbeam[closest]
    elif planet == 'sgrb2':
        return 100.0
コード例 #5
0
def readspexspec(
        image,
        linelistfile='/Users/adam/work/IRAS05358/code/linelist.txt',
        path_obs='',  #'/Users/adam/observations/IRAS05358/UT090108/',
        nameregex='2-1 S\(1\)|1-0 S\([1379028]\)|1-0 Q\([1234]\)|3-2 S\([35]\)|4-3 S\(5\)',
        vlsrcorr=0,
        backsub=False,
        **kwargs):

    regex = re.compile(nameregex)
    im = pyfits.open(path_obs + image)
    wlA = im[0].data[0, :] * 1e4
    data = im[0].data[1, :]

    countsperflux = 2.25e18
    errspec = im[0].data[2, :]

    lines = readcol(linelistfile, fsep='|', twod=False)
    lines[1] = asarray(lines[1],
                       dtype='float') * 1e4  # convert microns to angstroms

    specsegments = []
    for line in transpose(lines):
        wl = float(line[1])
        name = line[0]
        if wl > wlA.min() and wl < wlA.max() and regex.search(name) != None:
            closest = argmin(abs(wlA - wl))
            minind = closest - 7
            maxind = closest + 7

            savedata = data[minind:maxind]
            if backsub:
                savedata -= median(savedata[savedata < median(savedata)])
            specsegments.append({
                'name': name,
                'linewl': wl,
                'index': closest,
                'minind': minind,
                'maxind': maxind,
                'vlsrcorr': vlsrcorr,
                'wavelength': wlA[minind:maxind],
                'data': savedata,
                'noback': data[minind:maxind] * 0,
                'err': errspec[minind:maxind],
                'model': data[minind:maxind] * 0
            })

    print "Done finding lines"

    return specsegments
コード例 #6
0
def cenfile_overlays(cenfile):
    cen = readcol(cenfile, comment="#", asStruct=True)

    for name, ra, dec, obra, obdec in zip(cen.filename, cen.radeg, cen.decdeg,
                                          cen.obj_ra, cen.obj_dec):
        print name
        pos = coords.Position((ra, dec))
        xc, yc = pos.galactic()
        objpos = coords.Position((obra, obdec))
        xtr, ytr = objpos.galactic()

        make_overlay(name.tolist(), xc, yc, xtr, ytr)
        pylab.figure(0)
        pylab.clf()
コード例 #7
0
ファイル: h2fit.py プロジェクト: Fade89/agpy
def readspexspec(image,
    linelistfile = '/Users/adam/work/IRAS05358/code/linelist.txt',
    path_obs='', #'/Users/adam/observations/IRAS05358/UT090108/',
    nameregex='2-1 S\(1\)|1-0 S\([1379028]\)|1-0 Q\([1234]\)|3-2 S\([35]\)|4-3 S\(5\)',
    vlsrcorr=0,
    backsub=False,
    **kwargs):

    regex = re.compile(nameregex)
    im = pyfits.open(path_obs+image)
    wlA = im[0].data[0,:] * 1e4
    data = im[0].data[1,:]

    countsperflux = 2.25e18
    errspec = im[0].data[2,:]

    lines = readcol(linelistfile,fsep='|',twod=False)
    lines[1] = asarray(lines[1],dtype='float')*1e4 # convert microns to angstroms

    specsegments=[]
    for line in transpose(lines):
        wl = float(line[1])
        name = line[0]
        if wl > wlA.min() and wl < wlA.max() and regex.search(name) != None:
            closest = argmin(abs(wlA-wl))
            minind = closest-7
            maxind = closest+7

            savedata = data[minind:maxind]
            if backsub:
                savedata -= median(savedata[savedata<median(savedata)])
            specsegments.append({
                'name':name,
                'linewl':wl,
                'index':closest,
                'minind':minind,
                'maxind':maxind,
                'vlsrcorr':vlsrcorr,
                'wavelength':wlA[minind:maxind],
                'data':savedata,
                'noback':data[minind:maxind]*0,
                'err':errspec[minind:maxind],
                'model':data[minind:maxind]*0
                })

    print "Done finding lines"            

    return specsegments
コード例 #8
0
ファイル: h2fit.py プロジェクト: Fade89/agpy
    http://webbook.nist.gov/cgi/cbook.cgi?ID=C1333740&Units=SI&Mask=1000#Diatomic
    (see the bottom of the table)
    """

    We=4401.21
    Be=60.853
    WeXe=121.33 
    De=.0471
    Ae=3.062
    re=.74144

    return h * c * (We*(V+0.5) + Be*(J*(J+1)) - WeXe*(V+.5)**2 - De*J**2*(J+1)**2 - Ae*(V+.5)*(J+1)*J)

try:
    # read in rest energies before calling function
    resten = readcol(tablepath+'dalgarno1984_table5.txt',verbose=0)

    def restwl(vu,vl,ju,jl,calc=False):
        """ Uses energy levels measured by Dabrowski & Herzberg, Can J. Physics, 62,1639,1984 
        vu,vl - upper and lower vibrational states
        ju,jl - upper and lower rotational states 
        returns wavelength in microns
        online versions of this table:
        http://www.astronomy.ohio-state.edu/~depoy/research/observing/molhyd.htm
        http://www.jach.hawaii.edu/UKIRT/astronomy/calib/spec_cal/h2_s.html
        """
        if calc:
            return 1e4*h*c / (h2level_energy(vu,ju) - h2level_energy(vl,jl))
        else:
            if ju >= resten.shape[0] or vu >= resten.shape[1]:
                return 0
コード例 #9
0
"""
Find the closest magpis GPS point
"""
import numpy as np

def nearest_source(x1,y1,x2,y2):
    d = np.sqrt((x1-x2)**2 + (y1-y2)**2)
    return d.argmin(),d.min()

import atpy
from agpy import readcol

bgps = atpy.Table('bgps_iras_langston_scuba_match.tbl',type='ascii')
magpis = readcol('/Users/adam/work/catalogs/merge6_20.cat',fixedformat=[ 7,  7, 13, 13,  6,  9, 10,  8,  7,  7,  6,  5,  9, 10,  8,  9,  2],asStruct=True,skipline=10,nullval='          ')


dbest,bestmatch = np.zeros(len(bgps)),np.zeros(len(bgps))
for ii in xrange(len(bgps)):
    bestmatch[ii],dbest[ii] = nearest_source(bgps.glon_peak[ii],bgps.glat_peak[ii],magpis.Long,magpis.Lat)

bgps.add_column('Fint6',magpis.Fint6[bestmatch.astype('int')],unit="Jy/beam")
bgps.add_column('Fpeak6',magpis.Fpeak6[bestmatch.astype('int')],unit="Jy")
bgps.add_column('Fint20',magpis.Fint20[bestmatch.astype('int')],unit="Jy/beam")
bgps.add_column('Fpeak20',magpis.Fpeak20[bestmatch.astype('int')],unit="Jy")
bgps.add_column('BGPS-magpisdist',dbest,unit='degrees')

bgps.write("/Users/adam/work/catalogs/bgps_iras_langston_scuba_magpis_match.tbl",overwrite=True)
コード例 #10
0
# from astropy.io import fits
import os
import re
from agpy import readcol
from astropy import log
from astropy import units as u

from sdpy import makecube

bsgs = readcol('arecibo_bsg_freqref.txt',skipafter=1,asRecArray=True)

for line in bsgs:
    bsg = line['bsg']
    linefreq = line['restfreq']*u.MHz
    linename_ha = linename = line['linename']
    if linefreq == 0:
        continue
        
    # 7/29/2014: remove non-superresolution maps
    # 1/10/2014: add "superresolution" maps
    vmin = 30
    vmax = 90
    velocityrange = [-50,150]

    #makecube.generate_header(49.523158,-0.34987466,naxis1=96,naxis2=96,pixsize=20,naxis3=1600,cd3=0.5,clobber=True,restfreq=4.8296594e9)
    #makecube.generate_header(49.353568,-0.2982199,naxis1=144,naxis2=96,pixsize=20,naxis3=1600,cd3=0.5,clobber=True,restfreq=4.8296594e9)
    # reduced to CD3 = 1.0, naxis3 = 350 because of size and because the arecibo spectra looked artificially smoothed
    cd3 = 1.0
    crval3 = 50.0
    naxis3 = int((velocityrange[1]-velocityrange[0]) / cd3)
    makecube.generate_header(49.209553,-0.277137,naxis1=308,naxis2=205,pixsize=15,naxis3=int(naxis3),cd3=cd3,crval3=crval3,clobber=True,
コード例 #11
0
def plot_radex(filename,
               ngridpts=100,
               ncontours=50,
               plottype='ratio',
               transition="noname",
               thirdvarname="Temperature",
               cutnumber=None,
               cutvalue=10,
               vmin=None,
               vmax=None,
               logscale=False,
               save=True,
               **kwargs):
    """
    Create contour plots in density/column, density/temperature, or column/temperature
    filename - Name of the .dat file generated by radex_grid.py
    ngridpts - number of points in grid to interpolate onto
    ncontours - number of contours / colors
    plottype - can be 'ratio','tau1','tau2','tex1','tex2'
    transition - The name of the transition, e.g. "1-1_2-2".  Only used for saving
    thirdvarname - Third variable, i.e. the one that will be cut through.  If you want
        a density/column plot, choose temperature
    cutnumber - Cut on the cutnumber value of thirdvar.  e.g., if there are 5 temperatures 
        [10,20,30,40,50] and you set cutnumber=3, a temperature of 40K will be used
    cutvalue - Cut on this value; procedure will fail if there are no columns with this value
    vmin - Can force vmin/vmax in plotting procedures
    vmax - Can force vmin/vmax in plotting procedures
    logscale - takes log10 of plotted value before contouring 
    save - save the figure as a png?
    """

    names, props = readcol(filename, twod=False, names=True)
    temperature, density, column, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
    ratio = flux1 / flux2

    if thirdvarname == "Temperature":
        firstvar = density
        secondvar = column
        thirdvar = temperature
        if cutnumber is not None:
            cutvalue = unique(thirdvar)[int(cutnumber)]
        firstlabel = "log$(n_{H_2}) ($cm$^{-3})$"
        secondlabel = "log$(N_{H_2CO}) ($cm$^{-2})$"
        savetype = "DenCol_T=%iK" % cutvalue
        graphtitle = "T = %g K" % cutvalue
        firstvar = temperature
        secondvar = column
        thirdvar = density
        if cutnumber is not None:
            cutvalue = unique(thirdvar)[int(cutnumber)]
        firstlabel = "Temperature (K)"
        secondlabel = "log$(N_{H_2CO}) ($cm$^{-2})$"
        savetype = "TemCol_n=1e%gpercc" % cutvalue
        graphtitle = "n = %g cm$^{-3}$" % (10**cutvalue)
    elif thirdvarname == "Column":
        secondvar = density
        firstvar = temperature
        thirdvar = column
        if cutnumber is not None:
            cutvalue = unique(thirdvar)[int(cutnumber)]
        secondlabel = "log$(n_{H_2}) ($cm$^{-3})$"
        firstlabel = "Temperature (K)"
        savetype = "TemDen_N=1e%gpersc" % cutvalue
        graphtitle = "N = %g cm$^{-2}$" % (10**cutvalue)

    if plottype == 'ratio':
        cblabel = "$F_{1-1} / F_{2-2}$"
    elif plottype == 'tau1':
        cblabel = "$\\tau_{1-1}$"
    elif plottype == 'tau2':
        cblabel = "$\\tau_{2-2}$"
    elif plottype == 'tex1':
        cblabel = "$\\T_{ex}(1-1)$"
    elif plottype == 'tex2':
        cblabel = "$\\T_{ex}(2-2)$"

    varfilter = (thirdvar == cutvalue)
    if varfilter.sum() == 0:
        raise ValueError("Cut value %g does not match any of %s values" %
                         (cutvalue, thirdvarname))

    nx = len(unique(firstvar))
    ny = len(unique(secondvar))
    if firstvar is temperature:
        firstarr = linspace((firstvar.min()), (firstvar.max()), nx)
    else:
        firstarr = linspace(firstvar.min(), firstvar.max(), nx)
    secondarr = linspace(secondvar.min(), secondvar.max(), ny)

    exec('plotdata = %s' % plottype)

    plot_grid = griddata(firstvar[varfilter], secondvar[varfilter],
                         plotdata[varfilter], firstarr, secondarr)

    if vmax:
        plot_grid[plot_grid > vmax] = vmax
    if vmin:
        plot_grid[plot_grid > vmin] = vmin
    if logscale:
        plot_grid = log10(plot_grid)

    figure(1)
    clf()
    conlevs = logspace(-3, 1, ncontours)
    contourf(firstarr,
             secondarr,
             plot_grid,
             conlevs,
             norm=matplotlib.colors.LogNorm()
             )  #,**kwargs) #,norm=asinh_norm.AsinhNorm(**kwargs),**kwargs)
    xlabel(firstlabel)
    ylabel(secondlabel)
    title(graphtitle)
    cb = colorbar()
    cb.set_label(cblabel)
    cb.set_ticks([1e-3, 1e-2, 1e-1, 1, 1e1])
    cb.set_ticklabels([1e-3, 1e-2, 1e-1, 1, 1e1])
    if save: savefig("%s_%s_%s.png" % (savetype, plottype, transition))
コード例 #12
0
from agpy import plfit
from agpy import readcol

if not globals().has_key('dotests'): dotests = raw_input('Do tests? ') not in ['','n','N']

rcParams['font.size'] = 36

bcnames,bgps = readcol('/Users/adam/work/catalogs/bolocam_gps_v1_0.tbl',names=True,skipline=45)
bgpsd = readcol('/Users/adam/work/catalogs/bolocam_gps_v1_0.tbl',names=True,skipline=45,asdict=True)

bgflux = bgps[:,18].astype('float')
bgflux40 = bgps[:,12].astype('float')
bgrad = bgps[:,11]
bgrad = bgrad[bgrad!='null'].astype('float')

print "plBGFLUX"
plBGFLUX = plfit(bgflux,nosmall=True)
if dotests: plBGFLUXp,plBGFLUXksarr = plBGFLUX.test_pl()
# xmin: 4.877  n(>xmin): 369  alpha: 2.60503 +/- 0.0835548  Likelihood: -1009  ks: 0.943031
# p(2500) = .52
figure(1); clf()
xlabel('Flux Density (Jy)'); ylabel('N(S)')
plBGFLUX.plotpdf(nbins=40,dolog=True)
savefig('/Users/adam/work/massfunc/BGPS_bolocat_flux_Nhist.png',papertype='b3')
figure(2); clf();
xlabel('Flux Density (Jy)'); ylabel('$\Delta$N/$\Delta$S')
plBGFLUX.plotpdf(nbins=40,dolog=True,dnds=True)
savefig('/Users/adam/work/massfunc/BGPS_bolocat_flux_dndshist.png',papertype='b3')
clf(); plBGFLUX.alphavsks()
savefig('/Users/adam/work/massfunc/BGPS_bolocat_flux_alphaks.png',papertype='b3')
コード例 #13
0
#import cplfit
import plfit
import time
from numpy.random import rand,seed
from numpy import unique,sort,array,asarray,log,sum,min,max,argmin,argmax,arange
import sys
import powerlaw
from agpy import readcol

try:
    ne = int(sys.argv[1])
    seed(1)
    X=plfit.plexp_inv(rand(ne),1,2.5)
    X[:100] = X[100:200]
except ValueError:
    X = readcol(sys.argv[1])

if len(sys.argv)>2:
    discrete = bool(sys.argv[2])
else:
    discrete=None

print "Cython"
t1=time.time(); p3=plfit.plfit(X,discrete=discrete,usefortran=False,usecy=True); print time.time()-t1
print "Fortran"
t1=time.time(); p1=plfit.plfit(X,discrete=discrete,usefortran=True); print time.time()-t1
print "Numpy"
t1=time.time(); p3=plfit.plfit(X,discrete=discrete,usefortran=False); print time.time()-t1

print "Jeff Alcott's Powerlaw"
t5=time.time(); p5=powerlaw.Fit(X,discrete=discrete); print time.time()-t5
コード例 #14
0
    We = 4401.21
    Be = 60.853
    WeXe = 121.33
    De = .0471
    Ae = 3.062
    re = .74144

    return h * c * (We * (V + 0.5) + Be * (J * (J + 1)) - WeXe *
                    (V + .5)**2 - De * J**2 * (J + 1)**2 - Ae * (V + .5) *
                    (J + 1) * J)


try:
    # read in rest energies before calling function
    resten = readcol(tablepath + 'dalgarno1984_table5.txt', verbose=0)

    def restwl(vu, vl, ju, jl, calc=False):
        """ Uses energy levels measured by Dabrowski & Herzberg, Can J. Physics, 62,1639,1984 
        vu,vl - upper and lower vibrational states
        ju,jl - upper and lower rotational states 
        returns wavelength in microns
        online versions of this table:
        http://www.astronomy.ohio-state.edu/~depoy/research/observing/molhyd.htm
        http://www.jach.hawaii.edu/UKIRT/astronomy/calib/spec_cal/h2_s.html
        """
        if calc:
            return 1e4 * h * c / (h2level_energy(vu, ju) -
                                  h2level_energy(vl, jl))
        else:
            if ju >= resten.shape[0] or vu >= resten.shape[1]:
コード例 #15
0
ファイル: plot_grids.py プロジェクト: Fade89/agpy
def gridcube(filename,outfilename,var1="density",var2="column",var3="temperature",var4=None,plotvar="tau1",
        zerobads=True):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names,props = readcol(filename,twod=False,names=True)
    if var4 is None:
        temperature,density,column,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
    else:
        temperature,density,column,opr,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
        opr = numpy.floor(opr*100)/100.
    ratio = tau1 / tau2

    vardict = {
      "temperature":temperature,
      "density":density,
      "column":column,
      "tex1":tex1,
      "tex2":tex2,
      "tau1":tau1,
      "tau2":tau2,
      "tline1":tline1,
      "tline2":tline2,
      "flux1":flux1,
      "flux2":flux2,
      "ratio":ratio,
      "opr":opr,
      }

    xarr = (unique(vardict[var1])) #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    zarr = (unique(vardict[var3])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    nx = len(xarr)
    ny = len(yarr)
    nz = len(zarr)
    if var4 is not None:
        warr = (unique(vardict[var4])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)
        nw = len(warr)
        newarr = zeros([nw,nz,ny,nx])
        if nw != 11:
            import pdb; pdb.set_trace()
    else:
        newarr = zeros([nz,ny,nx])

    print "Cube shape will be ",newarr.shape

    if zerobads:
        pv = vardict[plotvar]
        pv[pv!=pv] = 0.0
        pv[isinf(pv)] = 0.0

    if var4 is None:
        for ival,val in enumerate(unique(vardict[var3])):
          varfilter = vardict[var3]==val
          newarr[ival,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr)
    else:
        for ival4,val4 in enumerate(warr):
            for ival3,val3 in enumerate(zarr):
              varfilter = (vardict[var3]==val3) * (vardict[var4]==val4)
              newarr[ival4,ival3,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr)

    newfile = pyfits.PrimaryHDU(newarr)
    if var4 is not None:
        newfile.header.update('CRVAL4' ,  (min(warr)) )
        newfile.header.update('CRPIX4' ,  1 )
        newfile.header.update('CTYPE4' ,  'LOG--OPR' )
        newfile.header.update('CDELT4' , ((warr)[1]) - ((warr)[0]) )
    newfile.header.update('BTYPE' ,  plotvar )
    newfile.header.update('CRVAL3' ,  (min(zarr)) )
    newfile.header.update('CRPIX3' ,  1 )
    newfile.header.update('CTYPE3' ,  'LIN-TEMP' )
    newfile.header.update('CDELT3' , ((zarr)[1]) - ((zarr)[0]) )
    newfile.header.update('CRVAL1' ,  min(xarr) )
    newfile.header.update('CRPIX1' ,  1 )
    newfile.header.update('CD1_1' , xarr[1]-xarr[0] )
    newfile.header.update('CTYPE1' ,  'LOG-DENS' )
    newfile.header.update('CRVAL2' ,  min(yarr) )
    newfile.header.update('CRPIX2' ,  1 )
    newfile.header.update('CD2_2' , yarr[1]-yarr[0] )
    newfile.header.update('CTYPE2' ,  'LOG-COLU' )
    newfile.writeto(outfilename,clobber=True)
コード例 #16
0
        # removed 'uranus_091210_ob5-6_mask_v2.0_0pca_coalign_map10.fits',
        # removed 'uranus_091210_ob7-8_mask_v2.0_0pca_coalign_map10.fits',
        'uranus_091216_ob8-9_mask_v2.0_0pca_coalign_map10.fits',
        'uranus_091219_o15-6_mask_v2.0_0pca_coalign_map10.fits',
        'uranus_091220_ob1-2_mask_v2.0_0pca_coalign_map10.fits',
        'uranus_091224_o10-9_mask_v2.0_0pca_coalign_map10.fits',
    ]
    if sample == 'notmars':
        #filelist = [ a for a in filelist  if a.find('mars')==-1 ]
        #filelist = filelist[:4]
        imstack = pylab.zeros([300, 300, len(filelist)])
        xx, yy = pylab.indices([300, 300])
elif sample == 'dec2011':
    filelist = [
        a for b in readcol(
            '/Users/adam/work/bgps_pipeline/plotting/lists/pointsource_ds2_13pca_default.list'
        ) for a in b
    ]
elif sample == 'dec2011notmars':
    filelist = [
        a for b in readcol(
            '/Users/adam/work/bgps_pipeline/plotting/lists/pointsource_ds2_13pca_default.list'
        ) for a in b if 'mars' not in a
    ]
    marslist = [
        a for b in readcol(
            '/Users/adam/work/bgps_pipeline/plotting/lists/pointsource_ds2_13pca_default.list'
        ) for a in b if 'mars' in a
    ]
    imstack = pylab.zeros([300, 300, len(filelist)])
    marsstack = pylab.zeros([300, 300, len(marslist)])
コード例 #17
0
def gridcube(filename,
             outfilename,
             var1="density",
             var2="column",
             var3="temperature",
             plotvar="tau1",
             zerobads=True):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names, props = readcol(filename, twod=False, names=True)
    temperature, density, column, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
    ratio = tau1 / tau2

    vardict = {
        "temperature": temperature,
        "density": density,
        "column": column,
        "tex1": tex1,
        "tex2": tex2,
        "tau1": tau1,
        "tau2": tau2,
        "tline1": tline1,
        "tline2": tline2,
        "flux1": flux1,
        "flux2": flux2,
        "ratio": ratio,
    }

    nx = len(unique(vardict[var1]))
    ny = len(unique(vardict[var2]))
    nz = len(unique(vardict[var3]))

    xarr = (unique(vardict[var1])
            )  #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])
            )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    newarr = zeros([nz, ny, nx])

    if zerobads:
        pv = vardict[plotvar]
        pv[pv != pv] = 0.0
        pv[isinf(pv)] = 0.0

    for ival, val in enumerate(unique(vardict[var3])):
        varfilter = vardict[var3] == val
        newarr[ival, :, :] = griddata((vardict[var1][varfilter]),
                                      (vardict[var2][varfilter]),
                                      vardict[plotvar][varfilter], xarr, yarr)

    newfile = pyfits.PrimaryHDU(newarr)
    newfile.header.update('BTYPE', plotvar)
    newfile.header.update('CRVAL3', (min(temperature)))
    newfile.header.update('CRPIX3', 1)
    newfile.header.update('CTYPE3', 'LIN-TEMP')
    newfile.header.update('CD3_3',
                          (unique(temperature)[1]) - (unique(temperature)[0]))
    newfile.header.update('CRVAL1', min(xarr))
    newfile.header.update('CRPIX1', 1)
    newfile.header.update('CD1_1', xarr[1] - xarr[0])
    newfile.header.update('CTYPE1', 'LOG-DENS')
    newfile.header.update('CRVAL2', min(yarr))
    newfile.header.update('CRPIX2', 1)
    newfile.header.update('CD2_2', yarr[1] - yarr[0])
    newfile.header.update('CTYPE2', 'LOG-COLU')
    newfile.writeto(outfilename, clobber=True)
コード例 #18
0
ファイル: clauset2009_tests.py プロジェクト: hedgeon/plfit
# coding: utf-8
from agpy import readcol
import plfit
from pylab import *

blackouts = readcol('blackouts.txt')
cities = readcol('cities.txt')
earthquakes = readcol('earthquakes.txt')
melville = readcol('melville.txt')
solarflares = readcol('solarflares.txt')
terrorism = readcol('terrorism.txt')

#print "quakes 0.00 -7.14 0.00 11.6 0.00 -7.09 0.00 -24.4 0.00 with cut-off"
#earthquakeP = plfit.plfit(earthquakes)


pl = plfit.plfit(cities.ravel() / 1e3, usefortran=True, verbose=True)
print "Cities (me)     : n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (pl.data.shape[0], pl.data.mean(), pl.data.std(), pl.data.max(), pl._xmin, pl._alpha, pl._alphaerr, pl._ngtx, pl._ks_prob)
print "Cities (Clauset): n:%10i mean,std,max: %8.2f,%8.2f,%8.2f xmin: %8.2f alpha: %8.2f (%8.2f) ntail: %10i p: %5.2f" % (19447,9.00,77.83,8009,52.46,2.37,0.08,580,0.76)
figure(1)
clf()
title("Cities")
subplot(131)
pl.plotpdf()
subplot(132)
title("Cities")
pl.xminvsks()
subplot(133)
pl.alphavsks()
savefig("figures/cities_kstests.png")
コード例 #19
0
def gridcube(filename,outfilename,var1="density",var2="column",var3="temperature",plotvar="tau1",
        zerobads=True):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names,props = readcol(filename,twod=False,names=True)
    temperature,density,column,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
    ratio = tau1 / tau2

    vardict = {
      "temperature":temperature,
      "density":density,
      "column":column,
      "tex1":tex1,
      "tex2":tex2,
      "tau1":tau1,
      "tau2":tau2,
      "tline1":tline1,
      "tline2":tline2,
      "flux1":flux1,
      "flux2":flux2,
      "ratio":ratio,
      }

    nx = len(unique(vardict[var1]))
    ny = len(unique(vardict[var2]))
    nz = len(unique(vardict[var3]))

    xarr = (unique(vardict[var1])) #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    newarr = zeros([nz,ny,nx])

    if zerobads:
        pv = vardict[plotvar]
        pv[pv!=pv] = 0.0
        pv[isinf(pv)] = 0.0

    for ival,val in enumerate(unique(vardict[var3])):
      varfilter = vardict[var3]==val
      newarr[ival,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr)

    newfile = pyfits.PrimaryHDU(newarr)
    newfile.header.update('BTYPE' ,  plotvar )
    newfile.header.update('CRVAL3' ,  (min(temperature)) )
    newfile.header.update('CRPIX3' ,  1 )
    newfile.header.update('CTYPE3' ,  'LIN-TEMP' )
    newfile.header.update('CD3_3' , (unique(temperature)[1]) - (unique(temperature)[0]) )
    newfile.header.update('CRVAL1' ,  min(xarr) )
    newfile.header.update('CRPIX1' ,  1 )
    newfile.header.update('CD1_1' , xarr[1]-xarr[0] )
    newfile.header.update('CTYPE1' ,  'LOG-DENS' )
    newfile.header.update('CRVAL2' ,  min(yarr) )
    newfile.header.update('CRPIX2' ,  1 )
    newfile.header.update('CD2_2' , yarr[1]-yarr[0] )
    newfile.header.update('CTYPE2' ,  'LOG-COLU' )
    newfile.writeto(outfilename,clobber=True)
コード例 #20
0
from agpy import plfit
from agpy import readcol

if not globals().has_key('dotests'):
    dotests = raw_input('Do tests? ') not in ['', 'n', 'N']

rcParams['font.size'] = 36

bcnames, bgps = readcol('/Users/adam/work/catalogs/bolocam_gps_v1_0.tbl',
                        names=True,
                        skipline=45)
bgpsd = readcol('/Users/adam/work/catalogs/bolocam_gps_v1_0.tbl',
                names=True,
                skipline=45,
                asdict=True)

bgflux = bgps[:, 18].astype('float')
bgflux40 = bgps[:, 12].astype('float')
bgrad = bgps[:, 11]
bgrad = bgrad[bgrad != 'null'].astype('float')

print "plBGFLUX"
plBGFLUX = plfit(bgflux, nosmall=True)
if dotests: plBGFLUXp, plBGFLUXksarr = plBGFLUX.test_pl()
# xmin: 4.877  n(>xmin): 369  alpha: 2.60503 +/- 0.0835548  Likelihood: -1009  ks: 0.943031
# p(2500) = .52
figure(1)
clf()
xlabel('Flux Density (Jy)')
ylabel('N(S)')
plBGFLUX.plotpdf(nbins=40, dolog=True)
コード例 #21
0
ファイル: coefficient_calculations.py プロジェクト: wiai/agpy
#!/usr/bin/env python
# data files come from ftp://cdsarc.u-strasbg.fr/pub/cats/J/A+A/493/687/
from agpy import readcol
import numpy

h2co_oo = readcol('h2co_oo.dat', asStruct=True, comment='%', skipline=3)
h2co_op = readcol('h2co_op.dat', asStruct=True, comment='%', skipline=3)
levels = readcol('o-h2co_levels.dat', asStruct=True)

nlevelso = len(h2co_oo.Jl)
h2co_oo.__dict__['llevelnum'] = numpy.zeros(nlevelso)
h2co_oo.__dict__['ulevelnum'] = numpy.zeros(nlevelso)
nlevelsp = len(h2co_op.Jl)
h2co_op.__dict__['llevelnum'] = numpy.zeros(nlevelsp)
h2co_op.__dict__['ulevelnum'] = numpy.zeros(nlevelsp)

temperatures = numpy.arange(5, 105, 5)


def R(a0, a1, a2, a3, a4, T):
    """
    Troscompt et al (2009) coefficients using Faure et al (2004) equation:
    log10(R) = sum(a_n T^{-n/6})
    where n=0..4, R is presumably cm^3 s^-1
    """
    return a0 + a1 * T**(-1. / 6.) + a2 * T**(-2. / 6.) + a3 * T**(
        -3. / 6.) + a4 * T**(-4. / 6.)


outf = open('o-h2co_troscompt.dat', 'w')
コード例 #22
0
    rcParams['font.family'] = 'serif'
    rcParams['font.serif'][0] = 'Times New Roman'
    rcParams['font.size'] = 16.0
    prefix = '/Volumes/disk2/data/bgps/releases/v1.0/v1.0.2/'
    #mosaic = prefix+'MOSAIC.fits'
    mosaic = '/Volumes/disk2/data/bgps/releases/IPAC/MOSAIC_snmap.fits'
    outf = prefix + 'fillfact_v1.0.2.txt'
    #fillingfactorplot(mosaic,outf,cutoff=[0.1,0.3,0.5],binsize=[0.1,0.5,1.0],lmin=-10.5,lmax=90.5)
    fillingfactorplot(mosaic,
                      outf,
                      cutoff=[3, 9, 12],
                      binsize=[0.1, 0.5, 1.0],
                      lmin=-10.5,
                      lmax=90.5)
    from agpy import readcol
    binpt1 = readcol(prefix + 'fillfact_v1.0.2_binsize0.10.txt', asStruct=True)
    binpt5 = readcol(prefix + 'fillfact_v1.0.2_binsize0.50.txt', asStruct=True)
    bin1 = readcol(prefix + 'fillfact_v1.0.2_binsize1.00.txt', asStruct=True)

    comosaic = '/Volumes/disk2/data/co/Wco_DHT2001.fits'
    cooutf = prefix + 'cofillfact_v1.0.2.txt'
    fillingfactorplot(comosaic,
                      cooutf,
                      cutoff=[50, 100, 150],
                      binsize=[0.1, 0.5, 1.0],
                      lmin=-10.5,
                      lmax=90.5)
    cobinpt1 = readcol(prefix + 'cofillfact_v1.0.2_binsize0.10.txt',
                       asStruct=True)
    cobinpt5 = readcol(prefix + 'cofillfact_v1.0.2_binsize0.50.txt',
                       asStruct=True)
コード例 #23
0
def readspec(
        image,
        noiseimage,
        linelistfile='/Users/adam/work/IRAS05358/code/linelist.txt',
        path_obs='',  #'/Users/adam/observations/IRAS05358/UT090108/',
        noiseaperture=[0, 10],
        aperture=[],
        nameregex='2-1 S\(1\)|1-0 S\([1379028]\)|1-0 Q\([1234]\)|3-2 S\([35]\)|4-3 S\(5\)',
        apname='',
        vlsrcorr=0):

    regex = re.compile(nameregex)
    im = pyfits.open(path_obs + image)
    noiseim = pyfits.open(path_obs + noiseimage)
    wlA = im[0].header['CRVAL1'] + im[0].header['CD1_1'] * (
        arange(im[0].data.shape[1]) - im[0].header['CRPIX1'] + 1)
    data = im[0].data
    atmospec = median(noiseim[0].data[noiseaperture[0]:noiseaperture[1]],
                      axis=0)

    countsperflux = 2.25e18
    stdatmo = noiseim[0].data[noiseaperture[0]:noiseaperture[1]].std(
        axis=0)  # std. dev. of non-backsubtracted data
    poisserr = sqrt(
        abs(noiseim[0].data).mean(axis=0) * countsperflux
    ) / countsperflux  # Poisson noise (very approximate correction)
    errspec = sqrt(
        stdatmo**2 + 2 * poisserr**2
    )  # poisson statistics - once for estimation of the noise, once for the subtraction
    errspec /= atmotrans_vect(
        wlA)**2  # Weight by inverse of atmospheric transmission^2

    lines = readcol(linelistfile, fsep='|', twod=False)
    lines[1] = asarray(lines[1],
                       dtype='float') * 1e4  # convert microns to angstroms

    specsegments = []
    for line in transpose(lines):
        wl = float(line[1])
        name = line[0]
        if wl > wlA.min() and wl < wlA.max() and regex.search(name) != None:
            closest = argmin(abs(wlA - wl))
            minind = closest - 7
            maxind = closest + 7

            if len(aperture) != 0:
                savedata = data[aperture[0]:aperture[1],
                                minind:maxind].sum(axis=0)
            else:
                savedata = data[:, minind:maxind]
            specsegments.append({
                'name': name,
                'apname': apname,
                'linewl': wl,
                'index': closest,
                'minind': minind,
                'maxind': maxind,
                'vlsrcorr': vlsrcorr,
                'wavelength': wlA[minind:maxind],
                'data': savedata,
                'noback': atmospec[minind:maxind],
                'err': errspec[minind:maxind],
                'model': data[0, minind:maxind] * 0
                #                'smoothdata':convolve( data[:,minind:maxind] , hanning(3) , 'same')
            })

    print "Done finding lines"

    return specsegments
コード例 #24
0
"""
Smooth the LVG models with distributions to get tau, then fit it with
optimization procedures.
"""

import numpy as np
import hopkins_pdf
import turbulent_pdfs
from turbulent_pdfs import lognormal_massweighted

try:
    from agpy import readcol
    radtab = readcol('radex_data/1-1_2-2_XH2CO=1e-9_troscompt.dat',asRecArray=True)
except ImportError:
    import astropy.table
    radtab = astropy.table.read('radex_data/1-1_2-2_XH2CO=1e-9_troscompt.dat',format='ascii')

# plotting stuff
import pylab as pl
pl.rc('font',size=20)

_datacache = {}

def select_data(abundance=-8.5, opr=1, temperature=20, tolerance=0.1):
    key = (abundance, opr, temperature, tolerance)
    if key in _datacache:
        return _datacache[key]
    else:
        #tolerance = {-10:0.1, -9.5: 0.3, -9: 0.1, -8:0.1, -8.5: 0.3}[abundance]
        OKtem = radtab['Temperature'] == temperature
        OKopr = radtab['opr'] == opr
コード例 #25
0
ファイル: speedcompare_plfit.py プロジェクト: zhlijia/plfit
#import cplfit
import plfit
import time
from numpy.random import rand, seed
from numpy import unique, sort, array, asarray, log, sum, min, max, argmin, argmax, arange
import sys
import powerlaw
from agpy import readcol

try:
    ne = int(sys.argv[1])
    seed(1)
    X = plfit.plexp_inv(rand(ne), 1, 2.5)
    X[:100] = X[100:200]
except ValueError:
    X = readcol(sys.argv[1])

if len(sys.argv) > 2:
    discrete = bool(sys.argv[2])
else:
    discrete = None

print("Cython")
t1 = time.time()
p3 = plfit.plfit(X, discrete=discrete, usefortran=False, usecy=True)
print(time.time() - t1)
print("Fortran")
t1 = time.time()
p1 = plfit.plfit(X, discrete=discrete, usefortran=True)
print(time.time() - t1)
print("Numpy")
コード例 #26
0
def gridcube(filename,
             outfilename,
             var1="density",
             var2="column",
             var3="temperature",
             var4=None,
             plotvar="tau1",
             zerobads=True,
             ratio_type='flux',
             round=2):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names, props = readcol(filename, twod=False, names=True)
    if round:
        for ii, name in enumerate(names):
            if name in ('Temperature', 'log10(dens)', 'log10(col)', 'opr'):
                props[ii] = np.round(props[ii], round)
    if var4 is None:
        temperature, density, column, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
    else:
        temperature, density, column, opr, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
        opr = np.floor(opr * 100) / 100.
    if ratio_type == 'flux':
        ratio = flux1 / flux2
    else:
        ratio = tau1 / tau2

    vardict = {
        "temperature": temperature,
        "density": density,
        "column": column,
        "tex1": tex1,
        "tex2": tex2,
        "tau1": tau1,
        "tau2": tau2,
        "tline1": tline1,
        "tline2": tline2,
        "flux1": flux1,
        "flux2": flux2,
        "ratio": ratio,
    }
    if var4 is not None:
        vardict['opr'] = opr

    nx = len(unique(vardict[var1]))
    ny = len(unique(vardict[var2]))
    nz = len(unique(vardict[var3]))
    if var4 is not None:
        nw = len(unique(vardict[var4]))

    xarr = (unique(vardict[var1])
            )  #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])
            )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    zarr = (unique(vardict[var3])
            )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    if var4 is not None:
        warr = (unique(vardict[var4])
                )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    if var4 is None:
        newarr = zeros([nz, ny, nx])
    else:
        newarr = zeros([nw, nz, ny, nx])
    print "Cube shape will be ", newarr.shape

    if zerobads:
        pv = vardict[plotvar]
        pv[pv != pv] = 0.0
        pv[isinf(pv)] = 0.0

    if var4 is None:
        for ival, val in enumerate(unique(vardict[var3])):
            varfilter = vardict[var3] == val
            #newarr[ival,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr,interp='linear')
            newarr[ival, :, :] = interpolate.griddata(
                np.array([vardict[var1][varfilter],
                          vardict[var2][varfilter]]).T,
                vardict[plotvar][varfilter], tuple(np.meshgrid(xarr, yarr)))
    else:
        for ival4, val4 in enumerate(unique(vardict[var4])):
            for ival3, val3 in enumerate(unique(vardict[var3])):
                varfilter = (vardict[var3] == val3) * (vardict[var4] == val4)
                #newarr[ival4,ival3,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr,interp='linear')
                if np.count_nonzero(varfilter) == 0:
                    warnings.warn(
                        "ERROR: There are no matches for {var3} == {val3} and {var4} == {val4}"
                        .format(val3=val3, val4=val4, var3=var3, var4=var4))
                    continue
                newarr[ival4, ival3, :, :] = interpolate.griddata(
                    np.array([
                        vardict[var1][varfilter], vardict[var2][varfilter]
                    ]).T, vardict[plotvar][varfilter],
                    tuple(np.meshgrid(xarr, yarr)))

    newfile = fits.PrimaryHDU(newarr)
    if var4 is not None:
        newfile.header.update('CRVAL4', (min(warr)))
        newfile.header.update('CRPIX4', 1)
        newfile.header.update('CTYPE4', 'NLIN-OPR')
        newfile.header.update('CDELT4', (unique(warr)[1]) - (unique(warr)[0]))
    newfile.header.update('BTYPE', plotvar)
    newfile.header.update('CRVAL3', (min(zarr)))
    newfile.header.update('CRPIX3', 1)
    if len(unique(zarr)) == 1:
        newfile.header.update('CTYPE3', 'ONE-TEMP')
        newfile.header.update('CDELT3', zarr[0])
    else:
        newfile.header.update('CTYPE3', 'LIN-TEMP')
        newfile.header.update('CDELT3', (unique(zarr)[1]) - (unique(zarr)[0]))
    newfile.header.update('CRVAL1', min(xarr))
    newfile.header.update('CRPIX1', 1)
    newfile.header.update('CD1_1', xarr[1] - xarr[0])
    newfile.header.update('CTYPE1', 'LOG-DENS')
    newfile.header.update('CRVAL2', min(yarr))
    newfile.header.update('CRPIX2', 1)
    newfile.header.update('CD2_2', yarr[1] - yarr[0])
    newfile.header.update('CTYPE2', 'LOG-COLU')
    newfile.writeto(outfilename, clobber=True)
コード例 #27
0
########################################################
# Started Logging At: 2012-09-06 10:26:32
########################################################

########################################################
# # Started Logging At: 2012-09-06 10:26:33
########################################################
import matplotlib
import numpy
import numpy as np
errstate = np.seterr(all="ignore")
import agpy
T = agpy.readcol('/Volumes/disk2/data/bgps/releases/v2.0/final/bolocat/bgps_v2.0_culled_ipac.txt',asStruct=True)
print T.glon_max[T.field=='gemob1']
T.field
print T.glon_max[T.field=='gemob1b']
print (T.glon_max[T.field=='gemob1b']-188)*3600
print (T.glon_max[T.field=='gemob1b']-188)*3600/7.2
print (T.glon_max[T.field=='gemob1b']-189)*3600/7.2
print (T.glon_max[T.field=='w5']-136)*3600/7.2
print (T.glon_max[T.field=='sh235']-136)*3600/7.2
print (T.glon_max[T.field=='sh235'])*3600/7.2 % 1
print ((T.glon_max[T.field=='sh235'])*3600/7.2 % 1)*10
print ((T.glon_max[T.field=='orionB'])*3600/7.2 % 1)*10
コード例 #28
0
"""
Smooth the LVG models with distributions to get tau, then fit it with
optimization procedures.
"""

import numpy as np
import hopkins_pdf
import turbulent_pdfs
from turbulent_pdfs import lognormal_massweighted

try:
    from agpy import readcol
    tablereader = lambda x: readcol(x, asRecArray=True)
except ImportError:
    import astropy.table
    tablereader = lambda x: astropy.table.read(x, format='ascii')

import os
path = __file__
pwd = os.path.split(path)[0]

# plotting stuff
import pylab as pl
pl.rc('font',size=20)



class SmoothtauModels(object):

    def __init__(self, datafile=os.path.join(pwd,'radex_data/1-1_2-2_XH2CO=1e-9_troscompt.dat')):
        self.datafile = datafile
コード例 #29
0
ファイル: plot_grids.py プロジェクト: Fade89/agpy
def plot_radex(filename,ngridpts=100,ncontours=50,plottype='ratio',
        transition="noname",thirdvarname="Temperature",
        cutnumber=None,cutvalue=10,vmin=None,vmax=None,logscale=False,
        save=True,**kwargs):
    """
    Create contour plots in density/column, density/temperature, or column/temperature
    filename - Name of the .dat file generated by radex_grid.py
    ngridpts - number of points in grid to interpolate onto
    ncontours - number of contours / colors
    plottype - can be 'ratio','tau1','tau2','tex1','tex2'
    transition - The name of the transition, e.g. "1-1_2-2".  Only used for saving
    thirdvarname - Third variable, i.e. the one that will be cut through.  If you want
        a density/column plot, choose temperature
    cutnumber - Cut on the cutnumber value of thirdvar.  e.g., if there are 5 temperatures 
        [10,20,30,40,50] and you set cutnumber=3, a temperature of 40K will be used
    cutvalue - Cut on this value; procedure will fail if there are no columns with this value
    vmin - Can force vmin/vmax in plotting procedures
    vmax - Can force vmin/vmax in plotting procedures
    logscale - takes log10 of plotted value before contouring 
    save - save the figure as a png?
    """

    names,props = readcol(filename,twod=False,names=True)
    temperature,density,column,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
    ratio = flux1 / flux2

    if thirdvarname == "Temperature":
      firstvar = density
      secondvar = column
      thirdvar = temperature
      if cutnumber is not None:
        cutvalue = unique(thirdvar)[int(cutnumber)]
      firstlabel = "log$(n_{H_2}) ($cm$^{-3})$"
      secondlabel = "log$(N_{H_2CO}) ($cm$^{-2})$"
      savetype = "DenCol_T=%iK" % cutvalue
      graphtitle = "T = %g K" % cutvalue
      firstvar = temperature
      secondvar = column
      thirdvar = density
      if cutnumber is not None:
        cutvalue = unique(thirdvar)[int(cutnumber)]
      firstlabel = "Temperature (K)"
      secondlabel = "log$(N_{H_2CO}) ($cm$^{-2})$"
      savetype = "TemCol_n=1e%gpercc" % cutvalue
      graphtitle = "n = %g cm$^{-3}$" % (10**cutvalue)
    elif thirdvarname == "Column":
      secondvar = density
      firstvar = temperature
      thirdvar = column
      if cutnumber is not None:
        cutvalue = unique(thirdvar)[int(cutnumber)]
      secondlabel = "log$(n_{H_2}) ($cm$^{-3})$"
      firstlabel = "Temperature (K)"
      savetype = "TemDen_N=1e%gpersc" % cutvalue
      graphtitle = "N = %g cm$^{-2}$" % (10**cutvalue)

    if plottype == 'ratio':
      cblabel = "$F_{1-1} / F_{2-2}$"
    elif plottype == 'tau1':
      cblabel = "$\\tau_{1-1}$"
    elif plottype == 'tau2':
      cblabel = "$\\tau_{2-2}$"
    elif plottype == 'tex1':
      cblabel = "$\\T_{ex}(1-1)$"
    elif plottype == 'tex2':
      cblabel = "$\\T_{ex}(2-2)$"

    varfilter = (thirdvar==cutvalue)
    if varfilter.sum() == 0:
      raise ValueError("Cut value %g does not match any of %s values" % (cutvalue, thirdvarname))

    nx = len(unique(firstvar))
    ny = len(unique(secondvar))
    if firstvar is temperature:
      firstarr = linspace((firstvar.min()),(firstvar.max()),nx)
    else:
      firstarr = linspace(firstvar.min(),firstvar.max(),nx)
    secondarr = linspace(secondvar.min(),secondvar.max(),ny)

    exec('plotdata = %s' % plottype)

    plot_grid = griddata(firstvar[varfilter],secondvar[varfilter],plotdata[varfilter],firstarr,secondarr)
    
    if vmax:
      plot_grid[plot_grid > vmax] = vmax
    if vmin:
      plot_grid[plot_grid > vmin] = vmin
    if logscale:
      plot_grid = log10(plot_grid)

    figure(1)
    clf()
    conlevs = logspace(-3,1,ncontours)
    contourf(firstarr,secondarr,plot_grid,conlevs,norm=matplotlib.colors.LogNorm())#,**kwargs) #,norm=asinh_norm.AsinhNorm(**kwargs),**kwargs)
    xlabel(firstlabel)
    ylabel(secondlabel)
    title(graphtitle)
    cb = colorbar()
    cb.set_label(cblabel)
    cb.set_ticks([1e-3,1e-2,1e-1,1,1e1])
    cb.set_ticklabels([1e-3,1e-2,1e-1,1,1e1])
    if save: savefig("%s_%s_%s.png" % (savetype,plottype,transition))
コード例 #30
0
#!/usr/bin/env python
# data files come from ftp://cdsarc.u-strasbg.fr/pub/cats/J/A+A/493/687/
from agpy import readcol
import numpy

h2co_oo = readcol('h2co_oo.dat',asStruct=True,comment='%',skipline=3)
h2co_op = readcol('h2co_op.dat',asStruct=True,comment='%',skipline=3)
levels = readcol('o-h2co_levels.dat',asStruct=True)

nlevelso = len(h2co_oo.Jl)
h2co_oo.__dict__['llevelnum'] = numpy.zeros(nlevelso)
h2co_oo.__dict__['ulevelnum'] = numpy.zeros(nlevelso)
nlevelsp = len(h2co_op.Jl)
h2co_op.__dict__['llevelnum'] = numpy.zeros(nlevelsp)
h2co_op.__dict__['ulevelnum'] = numpy.zeros(nlevelsp)

temperatures = numpy.arange(5,105,5)

def R(a0,a1,a2,a3,a4,T):
    """
    Troscompt et al (2009) coefficients using Faure et al (2004) equation:
    log10(R) = sum(a_n T^{-n/6})
    where n=0..4, R is presumably cm^3 s^-1
    """
    return a0 + a1*T**(-1./6.) + a2*T**(-2./6.) + a3*T**(-3./6.) + a4*T**(-4./6.)

outf = open('o-h2co_troscompt.dat','w')

print >>outf, "!MOLECULE"
print >>outf, "o-H2CO"
print >>outf, "!MOLECULAR WEIGHT"
コード例 #31
0
    pylab.plot(xarr,pylab.polyval(pf1,xarr),'b--',label='y=%0.2fx+%0.2f' % (pf1[0],pf1[1]))
    pylab.plot(xarr,pylab.polyval(pf2,xarr),'g:', label='y=%0.2fx+%0.2f' % (pf2[0],pf2[1]))
    pylab.legend(loc='best')
    pylab.savefig(savedir+'BGPS_correction_factors.png',bbox_inches='tight')

    pylab.figure(4)
    pylab.clf()
    pylab.hist(correction_factor_v1,bins=pylab.linspace(0.5,2.0,20),alpha=0.5,color='r')
    pylab.hist(correction_factor_v1[goodv1_lt2],bins=pylab.linspace(0.5,2.0,20),alpha=0.5,color='r')
    pylab.hist(correction_factor_v2,bins=pylab.linspace(0.5,2.0,20),alpha=0.5,color='b')
    pylab.hist(correction_factor_v2[goodv2_lt2],bins=pylab.linspace(0.5,2.0,20),alpha=0.5,color='b')
    pylab.xlabel("S(PPS)/S(BGPS) ``Correction Factor''")
    pylab.savefig(savedir+'BGPS_correction_factor_histograms.png',bbox_inches='tight')
"""

    dat = readcol(savedir + 'compare_plot_data.txt', asStruct=True)

    pf1 = pylab.polyfit(dat.PPS_120, dat.PPS_120 / dat.v10_120, 1)
    pf2 = pylab.polyfit(dat.PPS_120, dat.PPS_120 / dat.v20_120, 1)

    pylab.figure(3)
    pylab.clf()
    pylab.plot(dat.PPS_120, dat.PPS_120 / dat.v10_120, 'o', label='v1.0')
    pylab.plot(dat.PPS_120, dat.PPS_120 / dat.v20_120, 's', label='v2.0')
    pylab.xlabel('120" flux density in PPS (Jy)')
    pylab.ylabel('S(PPS)/S(BGPS) ``Correction Factor"')
    pylab.axis([0, 20, 0, 2])
    xarr = pylab.linspace(0, 20, 100)
    pylab.plot(xarr,
               pylab.polyval(pf1, xarr),
               'b--',
コード例 #32
0
def gridcube(filename, outfilename, var1="density", var2="column",
             var3="temperature", var4=None, plotvar="tau1", zerobads=True,
             ratio_type='flux', round=2):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names,props = readcol(filename,twod=False,names=True)
    if round:
        for ii,name in enumerate(names):
            if name in ('Temperature','log10(dens)','log10(col)','opr'):
                props[ii] = np.round(props[ii],round)
    if var4 is None:
        temperature,density,column,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
    else:
        temperature,density,column,opr,tex1,tex2,tau1,tau2,tline1,tline2,flux1,flux2 = props
        opr = np.floor(opr*100)/100.
    if ratio_type == 'flux':
        ratio = flux1 / flux2
    else:
        ratio = tau1 / tau2

    vardict = {
      "temperature":temperature,
      "density":density,
      "column":column,
      "tex1":tex1,
      "tex2":tex2,
      "tau1":tau1,
      "tau2":tau2,
      "tline1":tline1,
      "tline2":tline2,
      "flux1":flux1,
      "flux2":flux2,
      "ratio":ratio,
      }
    if var4 is not None:
        vardict['opr'] = opr

    nx = len(unique(vardict[var1]))
    ny = len(unique(vardict[var2]))
    nz = len(unique(vardict[var3]))
    if var4 is not None:
        nw = len(unique(vardict[var4]))

    xarr = (unique(vardict[var1])) #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    zarr = (unique(vardict[var3])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    if var4 is not None:
        warr = (unique(vardict[var4])) #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    if var4 is None:
        newarr = zeros([nz,ny,nx])
    else:
        newarr = zeros([nw,nz,ny,nx])
    print "Cube shape will be ",newarr.shape

    if zerobads:
        pv = vardict[plotvar]
        pv[pv!=pv] = 0.0
        pv[isinf(pv)] = 0.0

    if var4 is None:
        for ival,val in enumerate(unique(vardict[var3])):
          varfilter = vardict[var3]==val
          #newarr[ival,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr,interp='linear')
          newarr[ival,:,:] = interpolate.griddata(np.array([ vardict[var1][varfilter],vardict[var2][varfilter] ]).T,
                                                  vardict[plotvar][varfilter],
                                                  tuple(np.meshgrid(xarr,yarr)) )
    else:
        for ival4,val4 in enumerate(unique(vardict[var4])):
            for ival3,val3 in enumerate(unique(vardict[var3])):
              varfilter = (vardict[var3]==val3) * (vardict[var4]==val4)
              #newarr[ival4,ival3,:,:] = griddata((vardict[var1][varfilter]),(vardict[var2][varfilter]),vardict[plotvar][varfilter],xarr,yarr,interp='linear')
              if np.count_nonzero(varfilter) == 0:
                  warnings.warn("ERROR: There are no matches for {var3} == {val3} and {var4} == {val4}".format(val3=val3, val4=val4, var3=var3, var4=var4))
                  continue
              newarr[ival4,ival3,:,:] = interpolate.griddata(np.array([ vardict[var1][varfilter],vardict[var2][varfilter] ]).T,
                                                             vardict[plotvar][varfilter],
                                                             tuple(np.meshgrid(xarr,yarr)) )

    newfile = fits.PrimaryHDU(newarr)
    if var4 is not None:
        newfile.header.update('CRVAL4' ,  (min(warr)) )
        newfile.header.update('CRPIX4' ,  1 )
        newfile.header.update('CTYPE4' ,  'NLIN-OPR' )
        newfile.header.update('CDELT4' , (unique(warr)[1]) - (unique(warr)[0]) )
    newfile.header.update('BTYPE' ,  plotvar )
    newfile.header.update('CRVAL3' ,  (min(zarr)) )
    newfile.header.update('CRPIX3' ,  1 )
    if len(unique(zarr)) == 1:
        newfile.header.update('CTYPE3' ,  'ONE-TEMP' )
        newfile.header.update('CDELT3' , zarr[0])
    else:
        newfile.header.update('CTYPE3' ,  'LIN-TEMP' )
        newfile.header.update('CDELT3' , (unique(zarr)[1]) - (unique(zarr)[0]) )
    newfile.header.update('CRVAL1' ,  min(xarr) )
    newfile.header.update('CRPIX1' ,  1 )
    newfile.header.update('CD1_1' , xarr[1]-xarr[0] )
    newfile.header.update('CTYPE1' ,  'LOG-DENS' )
    newfile.header.update('CRVAL2' ,  min(yarr) )
    newfile.header.update('CRPIX2' ,  1 )
    newfile.header.update('CD2_2' , yarr[1]-yarr[0] )
    newfile.header.update('CTYPE2' ,  'LOG-COLU' )
    newfile.writeto(outfilename,clobber=True)
コード例 #33
0
"""
import numpy as np


def nearest_source(x1, y1, x2, y2):
    d = np.sqrt((x1 - x2)**2 + (y1 - y2)**2)
    return d.argmin(), d.min()


import atpy
from agpy import readcol

bgps = atpy.Table('bgps_iras_langston_scuba_match.tbl', type='ascii')
magpis = readcol(
    '/Users/adam/work/catalogs/merge6_20.cat',
    fixedformat=[7, 7, 13, 13, 6, 9, 10, 8, 7, 7, 6, 5, 9, 10, 8, 9, 2],
    asStruct=True,
    skipline=10,
    nullval='          ')

dbest, bestmatch = np.zeros(len(bgps)), np.zeros(len(bgps))
for ii in xrange(len(bgps)):
    bestmatch[ii], dbest[ii] = nearest_source(bgps.glon_peak[ii],
                                              bgps.glat_peak[ii], magpis.Long,
                                              magpis.Lat)

bgps.add_column('Fint6', magpis.Fint6[bestmatch.astype('int')], unit="Jy/beam")
bgps.add_column('Fpeak6', magpis.Fpeak6[bestmatch.astype('int')], unit="Jy")
bgps.add_column('Fint20',
                magpis.Fint20[bestmatch.astype('int')],
                unit="Jy/beam")
bgps.add_column('Fpeak20', magpis.Fpeak20[bestmatch.astype('int')], unit="Jy")
コード例 #34
0
def gridcube(filename,
             outfilename,
             var1="density",
             var2="column",
             var3="temperature",
             var4=None,
             plotvar="tau1",
             zerobads=True):
    """
    Reads in a radex_grid.py generated .dat file and turns it into a .fits data cube.
    filename - input .dat filename
    outfilename - output data cube name
    var1/var2/var3 - which variable will be used along the x/y/z axis?
    plotvar - which variable will be the value in the data cube?
    zerobads - set inf/nan values in plotvar to be zero
    """

    names, props = readcol(filename, twod=False, names=True)
    if var4 is None:
        temperature, density, column, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
    else:
        temperature, density, column, opr, tex1, tex2, tau1, tau2, tline1, tline2, flux1, flux2 = props
        opr = numpy.floor(opr * 100) / 100.
    ratio = tau1 / tau2

    vardict = {
        "temperature": temperature,
        "density": density,
        "column": column,
        "tex1": tex1,
        "tex2": tex2,
        "tau1": tau1,
        "tau2": tau2,
        "tline1": tline1,
        "tline2": tline2,
        "flux1": flux1,
        "flux2": flux2,
        "ratio": ratio,
        "opr": opr,
    }

    xarr = (unique(vardict[var1])
            )  #linspace(vardict[var1].min(),vardict[var1].max(),nx)
    yarr = (unique(vardict[var2])
            )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)
    zarr = (unique(vardict[var3])
            )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)

    nx = len(xarr)
    ny = len(yarr)
    nz = len(zarr)
    if var4 is not None:
        warr = (unique(vardict[var4])
                )  #linspace(vardict[var2].min(),vardict[var2].max(),ny)
        nw = len(warr)
        newarr = zeros([nw, nz, ny, nx])
        if nw != 11:
            import pdb
            pdb.set_trace()
    else:
        newarr = zeros([nz, ny, nx])

    print "Cube shape will be ", newarr.shape

    if zerobads:
        pv = vardict[plotvar]
        pv[pv != pv] = 0.0
        pv[isinf(pv)] = 0.0

    if var4 is None:
        for ival, val in enumerate(unique(vardict[var3])):
            varfilter = vardict[var3] == val
            newarr[ival, :, :] = griddata(
                (vardict[var1][varfilter]), (vardict[var2][varfilter]),
                vardict[plotvar][varfilter], xarr, yarr)
    else:
        for ival4, val4 in enumerate(warr):
            for ival3, val3 in enumerate(zarr):
                varfilter = (vardict[var3] == val3) * (vardict[var4] == val4)
                newarr[ival4, ival3, :, :] = griddata(
                    (vardict[var1][varfilter]), (vardict[var2][varfilter]),
                    vardict[plotvar][varfilter], xarr, yarr)

    newfile = pyfits.PrimaryHDU(newarr)
    if var4 is not None:
        newfile.header.update('CRVAL4', (min(warr)))
        newfile.header.update('CRPIX4', 1)
        newfile.header.update('CTYPE4', 'LOG--OPR')
        newfile.header.update('CDELT4', ((warr)[1]) - ((warr)[0]))
    newfile.header.update('BTYPE', plotvar)
    newfile.header.update('CRVAL3', (min(zarr)))
    newfile.header.update('CRPIX3', 1)
    newfile.header.update('CTYPE3', 'LIN-TEMP')
    newfile.header.update('CDELT3', ((zarr)[1]) - ((zarr)[0]))
    newfile.header.update('CRVAL1', min(xarr))
    newfile.header.update('CRPIX1', 1)
    newfile.header.update('CD1_1', xarr[1] - xarr[0])
    newfile.header.update('CTYPE1', 'LOG-DENS')
    newfile.header.update('CRVAL2', min(yarr))
    newfile.header.update('CRPIX2', 1)
    newfile.header.update('CD2_2', yarr[1] - yarr[0])
    newfile.header.update('CTYPE2', 'LOG-COLU')
    newfile.writeto(outfilename, clobber=True)