Exemple #1
0
def nova_plot():

	erg2mev=624151.

	fig=plot.figure()
	yrange = [1e-6,2e-4]
	xrange = [1e-1,1e5]
	plot.fill_between([0.2,10e3],[yrange[1],yrange[1]],[yrange[0],yrange[0]],facecolor='yellow',interpolate=True,color='yellow',alpha=0.5)
	plot.annotate('AMEGO',xy=(3,9e-5),xycoords='data',fontsize=26,color='black')

	lat=ascii.read("data/NMon2012.LAT.dat",names=['energy','en_low','en_high','flux','flux_err','tmp'])
	plot.scatter(lat['energy'],lat['flux']*erg2mev,color='red')
	plot.errorbar(lat['energy'],lat['flux']*erg2mev,xerr=[lat['en_low'],lat['en_high']],yerr=lat['flux_err']*erg2mev,ecolor='red',capsize=0,fmt='none')
	latul=ascii.read("data/NMon2012.LAT.limits.dat",names=['energy','en_low','en_high','flux','tmp1','tmp2','tmp3','tmp4'])
	plot.errorbar(latul['energy'],latul['flux']*erg2mev,xerr=[latul['en_low'],latul['en_high']],yerr=0.5*latul['flux']*erg2mev,uplims=True,ecolor='red',capsize=0,fmt='none')
	plot.scatter(latul['energy'],latul['flux']*erg2mev,color='red')

	leptonic=ascii.read("data/sp-NMon12-IC-best-fit-1MeV-30GeV.txt",names=['energy','flux'],data_start=1)
	hadronic=ascii.read("data/sp-NMon12-pi0-and-secondaries.txt",names=['energy','flux1','flux2'],data_start=1)	

	plot.plot(leptonic['energy'],leptonic['flux']*erg2mev,'r--',color='black',lw=2,label='Leptonic')
	plot.plot(hadronic['energy'],hadronic['flux2']*erg2mev,color='black',lw=2,label='Hadronic+Secondary Leptons')

	plot.legend(loc='upper right',fontsize='small',frameon=False,framealpha=0.5)
	plot.xscale('log')
	plot.yscale('log')
	plot.ylim(yrange)
	plot.xlim(xrange)
	plot.xlabel(r'Energy (MeV)')
	plot.ylabel(r'Energy$^2 \times $ Flux (Energy) (erg cm$^{-2}$ s$^{-1}$)')
	plot.title('Nova V339 Del 2013')
	plot.savefig('Nova_SED.png', bbox_inches='tight')
	plot.savefig('Nova_SED.eps', bbox_inches='tight')
	plot.show()
	plot.close()
    def read_llist(self,llist, fmt=0, set_unit='AA', silent=False):
        '''
        fmt: int (0)
           Format of line list.  Follows XIDL formatting..
           0: Standard absorption lines
           1: Galaxy/Quasar lines
        set_unit: string ('AA')
           Set units of wavelength. Use None to avoid 
        '''

        # Path + Format
        gdfil,fmt = llist_file(llist)
        if not silent:
            print('gdfil = {:s}, fmt={:d}'.format(gdfil,fmt))

        if fmt == 0:
            # Read Absorption Lines with Fixed Format (astropy Table)
            self.data = ascii.read(gdfil, format='fixed_width_no_header',data_start=1,
                            names=('wrest', 'name', 'fval'),
                            col_starts=(0,9,22), col_ends=(8,20,32))
        elif fmt == 1:
            self.data = ascii.read(gdfil, format='fixed_width_no_header',data_start=1,
                            names=('wrest', 'flg', 'name'),
                            col_starts=(0,10,13), col_ends=(8,11,23))
        # Specify Units
        if not set_unit is None:
            self.data['wrest'].unit = u.Unit(set_unit)
        return
Exemple #3
0
def read_table(infile, colname):
   """
   This new code can be used to read both the old-school text files (if they
    had the two-column format - see help for the read_text function) but
    also (and more importantly) the information directly from a CSV table
    of the form that is exported from smartsite or canvas.

   Inputs:
    infile  - input file name
    colname - the name of the column containing the score of interest.
              NOTE: for the old-school text files, this will be 'col2'
              while for the CSV files it could be something like
              'Midterm 2 (32620)' or 'MT2' or 'Final Score'
   """

   """ Read in the table """
   try:
      tab = ascii.read(infile, guess=False, format='csv')
   except:
      tab = ascii.read(infile)
   print(tab.colnames)

   """ Get the relevant information """
   try:
      tot = tab[colname].copy()
   except KeyError:
      print ''
      print 'Could not find a column matching %s in %s' % (colname,infile)
      tot = None
   return tot
Exemple #4
0
	def __init__(self, ob_name):
		self.ob_name = ob_name
		self.dataset_definition = os.getenv("XDIR")+'/dataset_definition/'+self.ob_name+'.txt'
		d=ascii.read(self.dataset_definition)
		self.night=d['night'][0]
		ob_name_list=[d['object'][0],d['telluric'][0],d['flux'][0]]
		self.arms=["NIR","VIS","UVB"]
		self.dprlist=["SCI","TELL","FLUX"]
	
#		self.dataset = Table(names=('dprtype', 'ob_name', 'arm', 'dpid'), dtype=('a10', 'a20', 'a3', 'a29'), meta={'night': self.night})
		self.dataset = Table(names=('dprtype', 'ob_name', 'arm', 'dpid'), dtype=(np.dtype((str,10)), np.dtype((str,20)), np.dtype((str,3)), np.dtype((str,29))), meta={'night': self.night})

		conn=sqlite3.connect(os.getenv("OBSDB"))
		c=conn.cursor()
	
		for arm in self.arms:
			f_arm=self.dataset_definition.split('.txt')[0]+'_'+arm+'.txt'
			if os.path.isfile(f_arm):
				d_arm=ascii.read(f_arm,data_start=0,names=["ob","dpid"])
				for ob,dpid,dpr in zip(d_arm["ob"],d_arm["dpid"],self.dprlist):
					self.dataset.add_row([dpr,ob,arm,dpid])
			else:
				with open(f_arm,'w') as f:
					for ob,dpr in zip(ob_name_list,self.dprlist):
						query = "select arcfile from shoot where night=\"" + self.night + \
							"\" and ob_name=\""+ ob + \
							"\" and arm=\"" + arm + \
							"\" and opti2_name=\"IFU\" limit 1;"
						c.execute(query)
						dpid=c.fetchone()
						self.dataset.add_row([dpr,ob,arm,dpid])
						file_string=ob+" "+dpid[0]+"\n"
						f.write(file_string)
def s2n(t,FUV,tnorm=1000):
    # load cos etc simulations for flat spectrum 1000s exp, and FUV = 17 mag
    cos_g130m = ascii.read('cos_etc_g130m_v24.1.csv')
    cos_g160m = ascii.read('cos_etc_g160m_v24.1.csv')
    #separate them at 1400 A
    cond = cos_g130m['wavelength'] < 1405
    cos_g130m = cos_g130m[cond]
    cond = cos_g160m['wavelength'] >= 1405
    cos_g160m = cos_g160m[cond]
    #merge both
    cos = vstack([cos_g130m,cos_g160m], join_type='exact')
    
    # Signal
    signal = cos['target_counts']*t/tnorm * 10.**((FUV-17.)/(-2.5))
    
    # Noise terms
    dark = cos['dark_counts']*t/tnorm
    sky = cos['sky_counts']*t/tnorm
        
    # Noise
    var = signal + dark + sky
    sig = np.sqrt(var)
    
    #append S/N to cos
    sn = signal/sig * np.sqrt(6) # per-resolution element of 3 pixels
    cos.add_column(Column(name='sn', data=sn))
    return cos
def plot_mpl_fig():
    datax = np.zeros(46)
    datay = np.zeros(46)
    halonames = np.zeros(0)
    rootdir = 'C:\Users\Cat\Documents\Research_Halos\HaloDetail'
    for subdir, dirs, files in os.walk(rootdir):
        head,tail = os.path.split(subdir)
        haloname = tail
        for file in files:
            if file.endswith('.list'):
                #print os.path.join(subdir, file)
                oldvalues = ascii.read(os.path.join(subdir, file), format = 'commented_header') #Get full path and access file
                maxvmax=max(oldvalues['vmax(16)'])                                             #Find the highest mass in each file 
            elif file.endswith('_subhalos'):
                #print os.path.join(subdir, file)
                newvalues = ascii.read(os.path.join(subdir, file), format = 'commented_header') #Get full path and access file
                sorted_vmax = np.array(sorted(newvalues['vmax(16)']))         
                vfrac = sorted_vmax/maxvmax
                index = np.arange(len(sorted_vmax))
                rindex = len(index)-index
                datax = np.append(datax, vfrac)
                datay = np.append(datay, rindex)
                plt.loglog(vfrac, rindex, alpha=0.8,label=haloname)
                halonames = np.append(halonames, haloname)
        print "%s done. On to the next." %haloname
def main():

    filepath = cu.get_data_path() + 'Berlind_groupcat/'
    savepath = cu.get_output_path() + 'processed_data/berlind_groupcat/'
    #################################################################

    catalogues=['mr19_groups.fits', 'smthresh10.2.groups.dat', 'smthresh9.8.groups.dat']

    filename = catalogues[0]
    hdulist = fits.open(filepath+filename, memmap=True)
    data = hdulist[1].data
    print 'saving as:', savepath+filename[:-5]+'.hdf5'
    f = h5py.File(savepath+filename[:-5]+'.hdf5', 'w')
    dset = f.create_dataset(filename[:-5], data=data)
    f.close()
    gc.collect()

    names = ['ra','dec','z','groupID','rank','Mstar','Mr','SSFR','Mgroup']

    filename = catalogues[1]
    data = ascii.read(filepath+filename, guess=True, Reader=ascii.Basic, names=names,data_start=0)
    print 'saving as:', savepath+filename[:-4]+'.hdf5'
    f = h5py.File(savepath+filename[:-4]+'.hdf5', 'w')
    dset = f.create_dataset(filename[:-4], data=data)
    f.close()
    gc.collect()

    filename = catalogues[2]
    data = ascii.read(filepath+filename, guess=True, Reader=ascii.Basic, names=names,data_start=0)
    print 'saving as:', savepath+filename[:-4]+'.hdf5'
    f = h5py.File(savepath+filename[:-4]+'.hdf5', 'w')
    dset = f.create_dataset(filename[:-4], data=data)
    f.close()
    gc.collect()
Exemple #8
0
    def observed_fetch(self, table=None, red=True, blu=True):
        """Read a fields_observed.dat file and save the contents.

        Arguments to be passed:
            table (default=None) : path of fields_observed file - default
                                   behaviour is to look in package data 
                                   directory for fields_observed.dat.
            red (default=True)   : boolean which when true specifies that
                                   details of the red concats should be read.
            blu (default=True)   : boolean which when true specifies that
                                   details of the blue concats should be read.
        """
        # Ensure date columns always read in as strings - makes it 
        # possible to validate values that are empty against "".
        conv = {"Hari_dat" : [ascii.convert_numpy(np.str)],
                "ugr_dat" : [ascii.convert_numpy(np.str)]}
        if table==None:
            try:
                self.observed = ascii.read(datadir+'/fields_observed.dat', 
                                        converters=conv)
            except:
                print ("Error: No table specified, and no " 
                       "fields_observed.dat")
                print "       in module data directory."
                return 0
        else:
            self.observed = ascii.read(table, converters=conv)
        self.bool_red = red
        self.bool_blu = blu
        return
Exemple #9
0
def phot_filter(fil_name, filter_dir):
    import numpy as np
    from astropy.io import ascii
    from pprint import pprint
    import os

    filename = filter_dir+'allfilters.dat'

    foo = open(filename, 'r')
    index = []
    filter_name = []
    for i, line in enumerate(foo):
        if line[0] == '#':
            index.append(i)
            filter_name.append(line.split('#')[1].lstrip().rstrip())
    index = np.array(index)
    filter_name = np.array(filter_name)

    ind, = np.where(filter_name == fil_name)

    while len(ind) == 0:
        if fil_name != 'ls':
            print('requested filter not found in database!')
            fil_name = input('Please enter the filter name (or ls for listing the filters in database): ')
        if fil_name == 'ls':
            pprint(filter_name)
            fil_name = input('Please enter the filter name (or ls for listing the filters in database): ')
        ind, =np.where(filter_name == fil_name)
    if fil_name != filter_name[-1]:
        phot_filter = ascii.read(filename, data_start=index[ind]-ind, data_end=index[ind+1]-ind-1, names=['wave','transmission'],header_start=None)
    else:
        phot_filter = ascii.read(filename, data_start=index[ind]-ind, names=['wave','transmission'],header_start=None)

    return phot_filter
Exemple #10
0
def s2n_COS(t, FUV, tnorm=1000, v="27.1"):
    # load cos etc simulations for flat spectrum 1000s exp, and FUV = 17 mag
    data_path('cos_etc_g130m_v26.1.csv')
    if v == '26.1':
        cos_g130m = ascii.read(data_path('cos_etc_g130m_v26.1.csv'))
        cos_g160m = ascii.read(data_path('cos_etc_g160m_v26.1.csv'))
    elif v=='27.1':
        cos_g130m = ascii.read(data_path('cos_etc_g130m_v27.1.csv'))
        cos_g160m = ascii.read(data_path('cos_etc_g160m_v27.1.csv'))
    else:
        raise ValueError("The current version is not implemented or its out of date.")

    #separate them at ~1450 A
    cond = cos_g130m['wavelength'] < 1440
    cos_g130m = cos_g130m[cond]
    cond = cos_g160m['wavelength'] >= 1440
    cos_g160m = cos_g160m[cond]
    # merge both
    cos = vstack([cos_g130m, cos_g160m], join_type='exact')

    # Signal
    signal = cos['target_counts'] * t / tnorm * 10.**((FUV- 17.)/(-2.5))

    # Noise terms
    dark = cos['dark_counts'] * t / tnorm
    sky = cos['sky_counts'] * t / tnorm

    # Noise
    var = signal + dark + sky
    sig = np.sqrt(var)

    #append S/N to cos
    sn = signal/sig * np.sqrt(6) # per-resolution element of 6 pixels
    cos['sn'] = sn
    return cos
Exemple #11
0
def add_stars(slit_num, ax_stis, ax_wfc3, im_stis, im_wfc3, cenwave):
    '''
    #ask if any stars need to be added
    '''
    tbdata_current = ascii.read(get_filename(slit_num, cenwave))
    if slit_num == 1:
        tbdata_previous = None
        tbdata_next = ascii.read(get_filename(slit_num+1, cenwave))
    elif slit_num == 17:
        tbdata_next = None
        tbdata_previous = ascii.read(get_filename(slit_num -1, cenwave))    
    else:
        tbdata_previous = ascii.read(get_filename(slit_num -1, cenwave))    
        tbdata_next = ascii.read(get_filename(slit_num+1, cenwave))
    ax_stis.set_ylim(900, 1040)
    ax_wfc3.set_ylim(900, 1040)
    pyplot.draw()
    to_do_flag = raw_input('Would you like to adjust the contrast (c), enter a star to be added to this slit (a), move down the slit (m), or move to next slit (q) ? ')
    while to_do_flag != 'q':
        if to_do_flag == 'c':
            set_contrast(im_stis)
            set_contrast(im_wfc3)
        elif to_do_flag == 'a':
            tbdata_current = add_star_to_slit(slit_num, tbdata_current, tbdata_previous, tbdata_next)
        elif to_do_flag == 'm':
            ax_stis = move_down_slit(ax_stis)
            ax_wfc3 = move_down_slit(ax_wfc3)
        to_do_flag = raw_input('Would you like to adjust the contrast (c), enter a star to be added to this slit (a), move down the slit (m), or finish (q) ? ')
    if slit_num < 10:
        shutil.copyfile('slit0{}_{}_phot.dat'.format(int(slit_num), cenwave), 'slit{}_{}_phot_no_split.dat'.format(int(slit_num), cenwave))
        ascii.write(tbdata_current, 'slit0{}_{}_phot.dat'.format(int(slit_num), cenwave))
    else:
        shutil.copyfile('slit{}_{}_phot.dat'.format(int(slit_num), cenwave), 'slit{}_{}_phot_no_split.dat'.format(int(slit_num), cenwave))
        ascii.write(tbdata_current, 'slit{}_{}_phot.dat'.format(int(slit_num), cenwave))
Exemple #12
0
def read_massey(catalog_name = None, spec_types = True, spec_catalog = None, **extras):
    """Read in data from Massey 2002, by default only include stars with spectral types."""

    table = ascii.read(catalog_name)
    if spec_types is True: #restrict to stars spectrally typed
        sptable = ascii.read(spec_catalog)
        table = table[sptable['CNum']-1]  

    names = ['RAh','Dec','bessell_U','bessell_U_unc','bessell_B','bessell_B_unc',
             'bessell_V','bessell_V_unc','bessell_R','bessell_R_unc','spType', 'r_spType']
    formats = ['<f8','<f8','<f8','<f8','<f8','<f8','<f8','<f8','<f8','<f8','a12','a8']
    newt = np.zeros(len(table), dtype = np.dtype(zip(names, formats)))

    newt['RAh'] = (table['RAh'] + table['RAm']/60. + table['RAs']/3600.)
    newt['Dec'] = (-1)*(table['DEd'] + table['DEm']/60. + table['DEs']/3600.)
    newt['bessell_V'] = table['Vmag']
    newt['bessell_B'] = table['Vmag'] + table['B-V']
    newt['bessell_U'] = table['Vmag'] + table['B-V'] + table['U-B']
    newt['bessell_R'] = table['Vmag'] - table['V-R']
    newt['bessell_V_unc'] = table['e_Vmag']
    newt['bessell_B_unc'] = np.sqrt(table['e_Vmag']**2 + table['e_B-V']**2) #use np.hypot
    newt['bessell_U_unc'] = np.sqrt(table['e_Vmag']**2 + table['e_B-V']**2 + table['e_U-B']**2)
    newt['bessell_R_unc'] = np.sqrt(table['e_Vmag']**2 + table['e_V-R']**2)

    if spec_types is True:
        newt['spType'] = sptable['SpType']
        newt['r_spType'] = sptable['r_SpType']
    else:
        newt['spType'][sptable['CNum']-1] = sptable['SpType']
        newt['r_spType'][sptable['CNum']-1] = sptable['r_SpType']

    return newt
def mkTvisFunctions( etime=20, filter1='f140w', filter2='f110w' ):
    """Read in simulations, build 2-D interpolators and return them as
    two functions that give the expected mean visibility time for a Ia
    and a CC at a given  (z,mu) position.
    """
    import os
    from scipy import interpolate as scint
    from astropy.io import ascii

    tvisdatfile1 = 'snIa_%s_%02imin_tvis.dat'%(filter1,etime)
    tvisdatfile2 = 'snII_%s_%02imin_tvis.dat'%(filter2,etime)
    tvisdat2 = ascii.read( tvisdatfile2, header_start=-1 )

    interpolators = []
    for tvisdatfile in [tvisdatfile1,tvisdatfile2] : 
        tvisdat = ascii.read( tvisdatfile, header_start=-1 )
        zgrid = tvisdat['z']
        mugrid =  np.array([ int(col[-2:]) for col in tvisdat.colnames
                             if col.startswith('tvis') ] )
        tvisgrid = np.array( [ tvisdat[col] for col in tvisdat.colnames
                               if col.startswith('tvis') ] )
        tvisinterp = scint.interp2d( zgrid, mugrid, tvisgrid, bounds_error=False,
                                     fill_value=None ) # extrapolate!
        interpolators.append( tvisinterp )
    return( interpolators )
Exemple #14
0
    def __init__(self, definition_file):
        self._dqcol = 'DQFLAG'
        self._sdcol = 'short'  # SHORT_DESCRIPTION
        self._ldcol = 'long'   # LONG_DESCRIPTION

        # Need to replace ~ with $HOME
        self.tab = ascii.read(
            os.path.expanduser(definition_file),
            names = (self._dqcol, self._sdcol, self._ldcol),
            converters = {self._dqcol: [ascii.convert_numpy(np.uint16)],
                          self._sdcol: [ascii.convert_numpy(np.str)],
                          self._ldcol: [ascii.convert_numpy(np.str)]})

        # Another table to store metadata
        self.metadata = ascii.read(self.tab.meta['comments'], delimiter='=',
                                   format='no_header', names=['key', 'val'])

        # Ensure table has OK flag to detect good pixel
        self._okflag = 0
        if self._okflag not in self.tab[self._dqcol]:
            self.tab.add_row([self._okflag, 'OK', 'Good pixel'])

        # Sort table in ascending order
        self.tab.sort(self._dqcol)

        # Compile a list of flags
        self._valid_flags = self.tab[self._dqcol]
Exemple #15
0
def get_host_ellipses( sn='colfax', cat1=None, rclose=10 ):
    """
    get ellipse parameters for all sources within <rclose> arcsec
    :return:
    """
    import numpy as np
    from astropy.io import ascii

    if sn == 'colfax' : 
        xSN, ySN = xColfax, yColfax
    elif sn == 'stone' : 
        xSN, ySN = xStone, yStone
    elif sn == 'bush' : 
        xSN, ySN = xBush, yBush
        raSN,decSN = 53.178232, -27.801973 

    if cat1 is None and sn in ['colfax','stone']:
        cat1 = ascii.read('/store/goods-n/catalogs/gn_all_candels_sx_h_130829_hphotom_comb_merge_psfmatch2h.cat')

    elif cat1 is None and sn in ['bush']:
        cat1 = ascii.read('/store/goods-s/catalogs/gs_all_sx_jh_120605_hphotom_comb_merge_psfmatch2h.cat')

    ra = cat1['ALPHA_J2000']
    dec = cat1['DELTA_J2000']

    x = cat1['X_IMAGE']
    y = cat1['Y_IMAGE']

    dist = np.sqrt( ((ra-raSN)*np.cos(np.deg2rad(dec))) **2 + (dec-decSN)**2 ) * 3600.
    iclose = np.where( dist< rclose )[0]

    if len(iclose)<1: 
        import pdb; pdb.set_trace()

    print("#idx           RA        Dec          d          x          y    theta        A       B       R")

    for i in iclose :
        # extract the ellipse parameters :

        Cxx = cat1['CXX_IMAGE'][i]
        Cyy = cat1['CYY_IMAGE'][i]
        Cxy = cat1['CXY_IMAGE'][i]
        xgal = x[i]
        ygal = y[i]

        ragal = ra[i]
        decgal = dec[i]

        xr = xSN - xgal
        yr = ySN - ygal
        R = np.sqrt(  Cxx*xr**2 + Cyy*yr**2 + Cxy*xr*yr )
        theta = cat1['THETA_IMAGE'][i]
        A = cat1['A_IMAGE'][i]
        B = cat1['B_IMAGE'][i]
        darcsec = dist[i]

        print( "%5i %11.6f %10.6f %10.3f %10.2f %10.2f %8.3f %8.3f %8.3f %6.2f"%( i,ragal,decgal,darcsec,xgal,ygal,theta,A,B,R) )
Exemple #16
0
    def __init__(self, base_dir=""):
        self.harris_tableI = ascii.read(base_dir + "mwgc.dat",
                                        data_start=48, data_end=205,
                                        delimiter=' ', guess=False,
                                        format="fixed_width_no_header",
                                        names=("ID", "Name", "RA", "Dec", "l",
                                               "b", "R_sun", "R_gc", "X", "Y",
                                               "Z"),
                                        col_starts=(0,10,23,37,50,59,67,74,80,86,92),
                                        col_ends=(9,22,36,49,58,66,73,79,85,91,97))

        self.harris_tableII = ascii.read(base_dir + "mwgc.dat", data_start=221,
                                         data_end=378, delimiter=' ',
                                         format="fixed_width_no_header",
                                         names=("ID", "[Fe/H]", "[Fe/H]wt",
                                                "EBV", "V_HB", "(m-M)V", "V_t",
                                                "M_V,t", "U-B", "B-V", "V-R",
                                                "V-I", "spt", "ellip"),
                                         col_starts=(0,12,19,22,29,35,41,47,54,61,67,
                                                     73,79,83),
                                         col_ends=(11,18,21,28,34,40,46,53,60,66,
                                                   72,78,82,90))

        self.harris_tableIII = ascii.read(base_dir + "mwgc.dat", data_start=395,
                                          data_end=552, delimiter=' ',
                                          format="fixed_width_no_header",
                                          names=("ID", "v_r", "v_r_pm", "v_LSR",
                                                 "sig_v", "sig_v_pm", "c",
                                                 "r_c_flag", "r_c", "r_h",
                                                 "mu_V", "rho_0", "lg(tc)",
                                                 "lg(th)"),
                                          col_starts=(0,10,19, 25,33,41,47,54,
                                                      58,64,70,78,83,90),
                                          col_ends=(9,18,24,32,40,46,53,57,63,
                                                    69,77,82,89,96))


        self.names = self.harris_tableII['ID']
        self.names2 = self.harris_tableI['Name']
        self.M_V = self.harris_tableII['M_V,t']

        self.r_h_arcmin = self.harris_tableIII['r_h']
        # Physical units
        #self.dist = 10**(0.2*self.harris_tableI['R_sun'])/100.0
        self.dist = self.harris_tableI['R_sun']

        self.r_h = self.r_h_arcmin*pi/(60*180.0)*self.dist*1000.0
        self.glat = self.harris_tableI['b']
        self.glon = self.harris_tableI['l']

        coords = SkyCoord(self.glon, self.glat, frame="galactic",
                          unit=(u.deg, u.deg))
        equ_coords = coords.transform_to('icrs')

        self.ra = equ_coords.ra
        self.dec = equ_coords.dec
def plot_tvis_lines( snIadatfile='snIa_tvis.dat', snIIdatfile='snII_tvis.dat'  ):
    """ Plot SN visibility time vs redshift for a range of mu values using
    .dat tables generated by snapsim.py
    """
    dat1 = ascii.read( snIadatfile, header_start=-1 )
    dat2 = ascii.read( snIIdatfile, header_start=-1 )

    mulist = [2,4,6,10,15,20]
    mucolorlist = ['m','b','c','g','r','k']

    pl.clf()
    ax1 = pl.subplot( 1,2,1 )
    ax2 = pl.subplot( 1,2,2, sharex=ax1, sharey=ax1 )

    for dat,ax,sntype in zip( [dat1,dat2], [ax1,ax2], ['Ia','II']) :

        z = np.array( dat['z'] )
        for mu, mucolor in zip(mulist,mucolorlist) :
            tvis = np.array( dat['tvis%02i'%mu] )
            err = np.array( dat['err%02i'%mu] )
            tvismax = tvis + err
            tvismin = np.max( [np.zeros(len(tvis)), tvis-err], axis=0 )
            # ax.fill_between( z, tvismin, tvismax, color=mucolor, alpha=0.3 )
            ax.plot( z, tvis, marker=' ', ls='-', color=mucolor, label='%i'%mu )
            z10 = z[ np.where(tvis<12)[0][0] ]
            ax.text( z10,10, '%i'%mu, color=mucolor, ha='center', va='center',
                     backgroundcolor='w' )

        # ax.legend(loc='upper right')
        ax.set_xlabel('Redshift')
        ax.set_ylabel('Visibility Time [days]')
        ax.text(0.95,0.95,'Type %s SN'%sntype, ha='right',va='top',
                transform=ax.transAxes, fontsize='large' )
        ax.set_ylim( 0, 140 )
        ax.set_xlim( 0.8, 3.2 )
        ax.text(1.0,10,'$\mu$=',ha='right',va='center',
                backgroundcolor='w' )

    fig = pl.gcf()
    fig.subplots_adjust( left=0.12, right=0.88, bottom=0.12, top=0.95, wspace=0 )

    ax2.yaxis.set_ticks_position('right')
    ax2.yaxis.set_label_position('right')
    ax2.yaxis.set_ticks_position('both')
    ax2.set_ylabel('Visibility Time [years]', rotation=-90 )

    # ax1.set_xlim(0.9,3.2)

    ax2.set_yticks( np.array([0.1,0.2,0.3])*365 )

    ax2.set_yticklabels( [0.1,0.2,0.3] )

    ax1.set_ylim(0,120)

    return( dat1, dat2 )
Exemple #18
0
def mainCommand():
    while True:    
        check_sent = glob.glob("./sent_list.*")
        if check_sent!=[]:
            sent_already = ascii.read("./sent_list.dat")
        else:
            sent_already = np.array([])

        if (os.path.isfile("./trigger.dat")):
            trigger_input = ascii.read("./trigger.dat", guess=False)
            if len(trigger_input['mac'])>0:
		additionalCondition = 1
		time.sleep(1)
                if len(sent_already["mac"])!=0:
		  try:
                    trig_unique_1 = np.array([x if ((x not in sent_already['mac'])|( (x in sent_already['mac'])and(np.max(sent_already['sent_time'][sent_already['mac']==x]) < calendar.timegm(time.gmtime()) - 10*60))) else "-99" for x in trigger_input['mac']])
                    trig_unique_2 = ((trig_unique_1[trig_unique_1!="-99"]))
                    print "Success"
		    print trig_unique_2
		    additionalCondition = 0
		  except Exception,e:
		    print str(e)
		    trig_unique_2 = np.array([])
		    print trig_unique_2
		    print "Pass3-Failed"
		   # print trig_unique_2
                else:
                    trig_unique_1 = trigger_input['mac']
                    trig_unique_2 = trig_unique_1
                    print "Initializing"
		    additionalCondition = 0

                if len(trig_unique_2)>0:
                    outbound_macs = np.array([])
                    outbound_time = np.array([])

                    for x in trig_unique_2:
                        if x not in restricted_list and additionalCondition == 0:
                            outbound_macs = np.append(outbound_macs, x)
                            outbound_time = np.append(outbound_time, (calendar.timegm(time.gmtime())) )
                            print_message = "Mac:%s, Last Visited:%s, Time Spent:%s minutes \n"%(x, time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(trigger_input['last_seen'][trigger_input['mac']==x]+ 8*60*60 )) , (trigger_input['time_spent'][trigger_input['mac']==x]))
                            print print_message
			    time.sleep(1)
			    namewithNumber = findPhone(x)
			    if namewithNumber is None:
				whatsapp_message = './yowsup-cli demos -c file.config -M -s 6584983348 "%s"' % print_message 
			    else:
			        whatsapp_message = './yowsup-cli demos -c file.config -M -s 65%s "%s, have you registered for Balik Kampung?"' % namewithNumber
			    subprocess.call(whatsapp_message, shell=True)

                    if len(sent_already)==0:
                        ascii.write([outbound_macs, outbound_time], names=['mac','sent_time'], output='./sent_list.dat')
                    else:
                        ascii.write([np.append(sent_already['mac'], outbound_macs), np.append(sent_already['sent_time'],outbound_time)], names=['mac','sent_time'], output='./sent_list.dat')
Exemple #19
0
def test_csv_ecsv_colnames_mismatch():
    """
    Test that mismatch in column names from normal CSV header vs.
    ECSV YAML header raises the expected exception.
    """
    lines = copy.copy(SIMPLE_LINES)
    header_index = lines.index('a b c')
    lines[header_index] = 'a b d'
    with pytest.raises(ValueError) as err:
        ascii.read(lines, format='ecsv')
    assert "column names from ECSV header ['a', 'b', 'c']" in str(err)
Exemple #20
0
 def __init__(self):
     r = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
                                 'calibration-r.csv'))
     i = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
                                 'calibration-i.csv'))
     ha = ascii.read(os.path.join(constants.DESTINATION, 'calibration',
                                  'calibration-ha.csv'))
     self.shifts = dict(zip(
                        np.concatenate((r['run'], i['run'], ha['run'])),
                        np.concatenate((r['shift'], i['shift'], ha['shift']))
                        ))
Exemple #21
0
    def _parse_result(self, response, verbose=False):
        """
        Parse a response into an astropy Table
        """

        try:
            result = ascii.read(response.content.split("\n"), delimiter=":", format="basic")
        except TypeError:
            # deprecated
            result = ascii.read(response.content.split("\n"), delimiter=":", Reader=ascii.Basic)

        return result
Exemple #22
0
def get_observational_data():
    """ Read files with observational data. """
    obs = ascii.read(os.path.join(tables_dir, "spec_radec.dat"))
    obs = [obs["Spec"], obs["Group"], obs["RA"], obs["DEC"], obs["RADIUS(deg)"], obs["Mask"]]
    candidates = ascii.read(os.path.join(tables_dir, "candidates_radec.dat"))
    cand_groups = np.array([x.split("_")[0] for x in candidates["specs"]])
    cand_ra = coord.Angle(candidates["RA"], unit=units.hour)
    cand_ra = cand_ra.degree
    cand_dec = coord.Angle(candidates["DEC"], unit=units.degree)
    cand_dec = cand_dec.degree
    cand = [candidates["specs"], cand_groups, cand_ra, cand_dec]
    return obs, cand
Exemple #23
0
def make_plot2(id = 123, field = 'aegis'):
	os.chdir("/Volumes/TOSHIBA EXT/3d_hst/noah/%s" % (field))
	
	data1 = ascii.read("%s..obs_sed" % (id))
	data2 = ascii.read("%s..temp_sed" % (id))
	lam1 = data1["lambda"]
	flux_1 = data1["flux_cat"]
	lam2 = data2["lambda"]
	flux_2 = data2["tempflux"]
	
	factor = 3.0*(10.0**5.56)
	flux1 = (flux_1*(lam1**-2.0))*factor
	flux2 = (flux_2*(lam2**-2.0))*factor
	
	error_ = data1["err_full"]
	error = (error_*(lam1**-2.0))*factor
	

	data = ascii.read("%s..pz" % (id))
	z = data["z"]
	pz = data["pz"]
	
	field = field.upper()
	id = str(id)
	
	os.chdir("/Volumes/TOSHIBA EXT/3d_hst/%s-RGB_v4.0" % (field))
	
	fig,pylab.axes = pylab.subplots(1, 2)

	a1 = pylab.axes[0]
	a2 = pylab.axes[1]
	
	a1.plot(lam2, flux2)
	a1.scatter(lam1, flux1)
	a1.errorbar(lam1, flux1, yerr=error, linestyle="none")
	a1.set_xlim([2000,140000])
	a1.set_ylim(ymin=0)
	a1.set_xscale("log")
	a1.set_xlabel("Wavelength")
	a1.set_ylabel("Flux")
	
	a2.plot(z, pz)
	a2.set_xlabel("z")
	a2.set_ylabel("pz")
	a2.set_yscale("log")
	a2.set_ylim([0.01,10])
	a2.set_title("P of Z")
	
	pylab.suptitle("id %s of field %s" % (id, field), fontsize=19)
	pylab.text(0, -110, "Wavelength vs Flux", fontsize=16)
	
	os.chdir("/Volumes/TOSHIBA EXT/3d_hst")
 def getFilter(self, filterName):
     if (filterName == 'rFilter'):
         Filter = ascii.read('/Users/natsomme/Dropbox/Uni/Thesis/fake_data_code/DES_asahi_r.txt')
     elif (filterName == 'iFilter'):
         Filter = ascii.read('/Users/natsomme/Dropbox/Uni/Thesis/fake_data_code/DES_asahi_i.txt')
     elif (filterName == 'gFilter'):
         Filter = ascii.read('/Users/natsomme/Dropbox/Uni/Thesis/fake_data_code/DES_asahi_g.txt')
     else:
         print 'Give a valid filter; "rFilter", "iFilter" or "gFilter".'
         quit
     # End of if-statement
     
     return Filter
Exemple #25
0
def plot_single_slit_locations(slit_num, ax_stis, cenwave, color = 'y'):
    '''
    Read in a single slit file and replot in a different color
    '''
    print 'slit = ', slit_num
    if slit_num < 10:
        tbdata = ascii.read('slit0{}_{}_phot.dat'.format(int(slit_num), cenwave))
    else:
        tbdata = ascii.read('slit{}_{}_phot.dat'.format(int(slit_num), cenwave))
    ax_stis.plot(np.ones(tbdata['y'].shape)*((slit_num -1)*4 + 2), tbdata['y'].data - 1, 
                        color = color, marker = 'o', linestyle = 'none', markersize = 10)
    pyplot.draw()
    return ax_stis
Exemple #26
0
def test_ecsv_but_no_yaml_warning():
    """
    Test that trying to read an ECSV without PyYAML installed when guessing
    emits a warning, but reading with guess=False gives an exception.
    """
    with catch_warnings() as w:
        ascii.read(SIMPLE_LINES)
    assert len(w) == 1
    assert "file looks like ECSV format but PyYAML is not installed" in str(w[0].message)

    with pytest.raises(ascii.InconsistentTableError) as exc:
        ascii.read(SIMPLE_LINES, format='ecsv')
    assert "PyYAML package is required" in str(exc)
def plot(filters):
    for f in filters:
         fig=plt.figure()
         file_plot= ascii.read('photometry_'+str(sn_name)+str(f)+'.txt','r')
         file2_plot=ascii.read('star_'+str(sn_name)+str(f)+'.txt','r')
         plt.figure(str(f))
         plt.errorbar(file_plot["Time"],file_plot["Count"],file_plot["Error"],file_plot["Error"]*0,fmt='ko')
         plt.errorbar(file2_plot["Time"],file2_plot["Count"],file2_plot["Error"],file2_plot["Error"]*0,fmt='o')
         plt.title(str(sn_name)+str(f))
         ax=fig.add_subplot()
         ax.xaxis.set_major_formatter()
         plt.savefig(str(sn_name)+str(f))
         plt.show()
Exemple #28
0
	def __init__(self, ob_name, object_name=""):
		self.ob_name = ob_name
		self.object_name = object_name

		if self.object_name == "":
			if self.ob_name[:-2] == "ESO137":
				self.object_name = "ESO137-G034"
			else:
				self.object_name = ob_name[:-2]

		##
		## TODO: need some error handling here!
		n=Ned.query_object(self.object_name)
		self.z=n['Redshift'].data.data[0]

		##
		## set some directories and files
		self.f_combined = os.getenv("XDIR")+'/combined/'+self.ob_name+'.fits'
		self.dir_starlight = os.getenv("HOME") + "/STARLIGHT"
		self.f_starlight_bc03 = self.dir_starlight + "/spectra/" + self.ob_name + ".txt"
		self.cfg_SL_infile=self.dir_starlight+"/infiles/"+self.ob_name+".in"
		self.dataset_definition = os.getenv("XDIR")+'/dataset_definition/'+self.ob_name+'.txt'
		d=ascii.read(self.dataset_definition)
		self.night=d['night'][0]
		ob_name_list=[d['object'][0],d['telluric'][0],d['flux'][0]]
		self.arms=["NIR","VIS","UVB"]
		self.dprlist=["SCI","TELL","FLUX"]
	
		self.dataset = Table(names=('dprtype', 'ob_name', 'arm', 'dpid'), dtype=(np.dtype((str,10)), np.dtype((str,20)), np.dtype((str,3)), np.dtype((str,29))), meta={'night': self.night})

		conn=sqlite3.connect(os.getenv("OBSDB"))
		c=conn.cursor()
	
		for arm in self.arms:
			f_arm=self.dataset_definition.split('.txt')[0]+'_'+arm+'.txt'
			if os.path.isfile(f_arm):
				d_arm=ascii.read(f_arm,data_start=0,names=["ob","dpid"])
				for ob,dpid,dpr in zip(d_arm["ob"],d_arm["dpid"],self.dprlist):
					self.dataset.add_row([dpr,ob,arm,dpid])
			else:
				with open(f_arm,'w') as f:
					for ob,dpr in zip(ob_name_list,self.dprlist):
						query = "select arcfile from shoot where night=\"" + self.night + \
							"\" and ob_name=\""+ ob + \
							"\" and arm=\"" + arm + \
							"\" and opti2_name=\"IFU\" limit 1;"
						c.execute(query)
						dpid=c.fetchone()
						self.dataset.add_row([dpr,ob,arm,dpid])
						file_string=ob+" "+dpid[0]+"\n"
						f.write(file_string)
Exemple #29
0
def load_kowalski08():
    """
    Nearby 99 set from Kowalski et al 2008
    http://adsabs.harvard.edu/abs/2008ApJ...686..749K
    """
    
    readme = download_file(CDS_PREFIX + "J/ApJ/686/749/ReadMe", "kowalski08")
    table1 = download_file(CDS_PREFIX + "J/ApJ/686/749/table1.dat",
                           "kowalski08")
    table10 = download_file(CDS_PREFIX + "J/ApJ/686/749/table10.dat",
                            "kowalski08")

    # Parse SN coordinates and redshifts
    meta = ascii.read(table1, format='cds', readme=readme)
    ra = hms_to_deg(meta['RAh'], meta['RAm'], meta['RAs'])
    dec = sdms_to_deg(meta['DE-'], meta['DEd'], meta['DEm'], meta['DEs'])

    data = ascii.read(table10, format='cds', readme=readme)
    data = data.filled(0.)  # convert from masked table

    data = pivot_table(data, 'band', ['{}mag', 'e_{}mag'],
                       ['B', 'V', 'R', 'I'])
    data = data[data['mag'] != 0.]  # eliminate missing values

    # Join telescope and band into one column
    data['band'] = np.char.add(np.char.replace(data['Tel'], ' ', '_'),
                               np.char.add('_', data['band']))
    del data['Tel']

    # Split up table into one table per SN and add metadata.
    sne = OrderedDict()
    for i in range(len(meta)):
        name = meta['SN'][i]
        sndata = data[data['SN'] == name]
        snmeta = OrderedDict([('name', name),
                              ('dataset', 'kowalski08'),
                              ('z_helio', meta['z'][i]),
                              ('ra', ra[i]),
                              ('dec', dec[i])])
        zp = 29. * np.ones(len(sndata), dtype=np.float64)
        zpsys = len(sndata) * ['vega']
        flux, fluxerr = mag_to_flux(sndata['mag'], sndata['e_mag'], zp)
        sne[name] = Table([jd_to_mjd(sndata['JD']), sndata['band'],
                           flux, fluxerr, zp, zpsys],
                          names=('time', 'band', 'flux', 'fluxerr', 'zp',
                                 'zpsys'),
                          meta=snmeta)
        # TODO: correct descriptions on table columns.

    return sne
Exemple #30
0
def run():
    import astropy.io.ascii as ascii
    try:
        a = ascii.read('test.dat')
    except:
        generate_test_data()
        a = ascii.read('test.dat')

    lm = linmix.LinMix(a['x'], a['y'], a['xsig'], a['ysig'], delta=a['delta'])
    lm.run_mcmc()
    ascii.write(lm.chain[['alpha', 'beta', 'sigsqr',
                          'mu0', 'usqr', 'wsqr',
                          'ximean', 'xisig', 'corr']],
                'test.pyout')
Exemple #31
0
import astropy.units as u
from astropy.io import ascii
from astropy.time import Time
from astropy.table import Table, Column, join
import numpy as np

# Project
from streams.coordinates import sex_to_dec
from streams.observation.time import gmst_to_utc, lmst_to_gmst
from streams.observation.rrlyrae import time_to_phase, phase_to_time
from streams.util import project_root

data_file = os.path.join(project_root, "data", "catalog", "TriAnd_RRLyr.txt")
stars = ascii.read(data_file,
                   converters={'objectID': [ascii.convert_numpy(np.str)]},
                   header_start=0,
                   data_start=1,
                   delimiter=" ")

# Need to wrap so RA's go 22,23,24,0,1,etc.
ras = np.array(stars['ra'])
ras[ras > 90.] = ras[ras > 90.] - 360.
idx = np.argsort(ras)
stars = stars[idx]

names = ["TriAndRRL{0}".format(ii + 1) for ii in range(len(stars))]
stars.add_column(Column(names, name='name'))

# Read in RR Lyrae standards
RRLyr_stds1 = ascii.read(
    "/Users/adrian/Documents/GraduateSchool/Observing/Std RR Lyrae/nemec_RRLyrae.txt"
Exemple #32
0
import matplotlib.patches as mpatches
import math
import numpy as np

input_starnum = 50000
nbins = 10000

#plt.clf()
#plt.cla()

#os.environ['ISOCHRONE_DIR'] = '/home/holtz/analysis/apogee/dist/isochrones/'

# Pull columns from .dat files:
data = ascii.read('zp01.dat',
                  names=[
                      'Z', 'Log_Age', 'M_initial', 'M_actual', 'Log_L',
                      'Log_Teff', 'Log_G', 'm_bol', 'U', 'B', 'V', 'R', 'I',
                      'J', 'H', 'K', 'int_IMF', 'Stage'
                  ])

agestruct = data[where(data['Log_Age'] == 9)]

x = agestruct['Log_Teff']
y = agestruct['Log_L']

xlen = int(np.sqrt(nbins))
ylen = xlen

dx = (max(x) - min(x)) / xlen
dy = (max(y) - min(y)) / ylen

newx = []
Exemple #33
0
file_sourcelist_name = file_sourcelist_coord_px.split('.', 2)[0]
print(file_sourcelist_name)

file_reg_phy = file_sourcelist_name + '_phy.reg'
print('will write to : ' + file_reg_phy)
if os.path.exists(file_reg_phy):
    os.remove(file_reg_phy)
f_reg = open(file_reg_phy, 'w')

f_reg.write('# Region file format: DS9 version 4.1\n')
f_reg.write(
    'global color=green dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n'
)
f_reg.write('physical\n')
#sys.exit(0)
array_sourcelist_coord_px = ascii.read(file_sourcelist_coord_px)
print(array_sourcelist_coord_px)
n_line = len(array_sourcelist_coord_px)
#print(n_line)

xydxy = np.zeros((3, n_line))

for i in range(n_line):
    x = array_sourcelist_coord_px[i][0]
    y = array_sourcelist_coord_px[i][1]
    xydxy[0][i] = x
    xydxy[1][i] = y
    dx = abs(x - 1024.)
    dy = abs(y - 1024.)
    dxy = np.sqrt(dx**2 + dy**2)
    xydxy[2][i] = dxy
Exemple #34
0
def make_validation_table(fitspath: str, vmin_4363SN=3, vmin_5007SN=100,
                          vmax_4363sig=1.6, rlmin_4363SN=3,
                          rlmax_4363sig=1.6, rlmin_5007SN=100):
    """
    This function creates a validation table for a given binning set.
    The validation table contains a OIII4363 detection column where 1.0
    means detection, 0.5 means non-detection with reliable OIII5007, and
    0.0 means unreliable non-detection. This function will be run every
    time the analysis is completed and will create a validation table
    for every analysis.

    Usage:
        valid_table.make_validation_table(fitspath, bin_type_str)

    :param fitspath: Full file path where the input file is and where the
                     output file will be placed.
    :param vmin_4363SN: int. minimum OIII4363 S/N for valid detection
    :param vmin_5007SN: int. minimum OIII5007 S/N for valid detection
    :param vmax_4363sig: int. maximum OIII4363 sigma for valid detection
    :param rlmin_4363SN: int. minimum OIII4363 S/N for robust limit
    :param rlmax_4363sig: int. maximum OIII4363 sigma for robust limit
    :param rlmin_5007SN: int. minimum OIII5007 S/N for robust limit

    Outputs:
      fitspath + 'bin_validation.tbl'
        Validation table containing bin IDs; number of galaxies in each bin;
        and column indicating OIII4363 detection/non-detection,
        OIII4363_Flux_Observed, OIII4363_S/N
    """

    bin_table = asc.read(fitspath + filename_dict['bin_info'])
    em_table = asc.read(fitspath + filename_dict['bin_fit']) 

    bin_ID = em_table['bin_ID'].data
    raw_OIII4363 = em_table['OIII_4363_Flux_Observed'].data
    O_4363_SN = em_table['OIII_4363_S/N'].data
    O_4363_sigma = em_table['OIII_4363_Sigma'].data
    O_5007_SN = em_table['OIII_5007_S/N'].data
    
    N_stack = bin_table['N_stack'].data
    Hgamma_SN = em_table['HGAMMA_S/N'].data
    Hgamma = em_table['HGAMMA_Flux_Observed'].data

    detection  = np.zeros(len(bin_ID))
    OIII4363 = np.zeros(len(bin_ID))
    up_limit = (Hgamma/Hgamma_SN) * 3

    valid_stacks_idx = np.where((O_4363_SN >= vmin_4363SN) &
                                (O_5007_SN > vmin_5007SN) &
                                (O_4363_sigma < vmax_4363sig))[0]
    reliable_5007_stacks = np.where((O_4363_sigma < rlmax_4363sig) &
                                    (O_5007_SN > rlmin_5007SN))[0]
    wide_line_valid = np.where((O_4363_SN >= rlmin_4363SN) &
                               (O_5007_SN > rlmin_5007SN) &
                               (O_4363_sigma >= rlmax_4363sig))[0]
    detection[reliable_5007_stacks] = 0.5
    detection[wide_line_valid] = 0.5
    detection[valid_stacks_idx] = 1
    print(detection)

    for ii in range(len(OIII4363)):
        if detection[ii] == 1:
            OIII4363[ii] = raw_OIII4363[ii]
        if detection[ii] == 0.5:
            OIII4363[ii] = up_limit[ii]
        if detection[ii] == 0:
            OIII4363[ii] = up_limit[ii]

    ver_tab = fitspath + filename_dict['bin_valid']
    tab1 = Table([bin_ID, N_stack, detection, OIII4363, O_4363_SN],
                 names=valid_table_names0)
    asc.write(tab1, ver_tab, format='fixed_width_two_line')
def test_docs_example():
    # Test the example in astroplan/docs/tutorials/constraints.rst
    target_table_string = """# name ra_degrees dec_degrees
    Polaris 37.95456067 89.26410897
    Vega 279.234734787 38.783688956
    Albireo 292.68033548 27.959680072
    Algol 47.042218553 40.955646675
    Rigel 78.634467067 -8.201638365
    Regulus 152.092962438 11.967208776"""

    from astroplan import Observer, FixedTarget
    from astropy.time import Time
    subaru = Observer.at_site("Subaru")
    time_range = Time(["2015-08-01 06:00", "2015-08-01 12:00"])

    # Read in the table of targets
    from astropy.io import ascii
    target_table = ascii.read(target_table_string)

    # Create astroplan.FixedTarget objects for each one in the table
    from astropy.coordinates import SkyCoord
    import astropy.units as u
    targets = [
        FixedTarget(coord=SkyCoord(ra=ra * u.deg, dec=dec * u.deg), name=name)
        for name, ra, dec in target_table
    ]

    from astroplan import Constraint, is_observable

    class VegaSeparationConstraint(Constraint):
        """
        Constraint the separation from Vega
        """
        def __init__(self, min=None, max=None):
            """
            min : `~astropy.units.Quantity` or `None` (optional)
                Minimum acceptable separation between Vega and target. `None`
                indicates no limit.
            max : `~astropy.units.Quantity` or `None` (optional)
                Minimum acceptable separation between Vega and target. `None`
                indicates no limit.
            """
            self.min = min if min else 0 * u.deg
            self.max = max if max else 180 * u.deg

        def compute_constraint(self, times, observer, targets):
            vega = SkyCoord(ra=279.23473479 * u.deg, dec=38.78368896 * u.deg)

            # Calculate separation between target and vega
            # Targets are automatically converted to SkyCoord objects
            # by __call__ before compute_constraint is called.
            vega_separation = vega.separation(targets)

            # Return an array that is True where the target is observable and
            # False where it is not
            return (self.min < vega_separation) & (vega_separation < self.max)

    constraints = [VegaSeparationConstraint(min=5 * u.deg, max=30 * u.deg)]
    observability = is_observable(constraints,
                                  subaru,
                                  targets,
                                  time_range=time_range)

    assert all(observability == [False, False, True, False, False, False])
def DoAll():
    data = [ascii.read(filters[k]+'_stips.txt',format='ipac')\
            for k in range(len(filters))]
    xy = read_ascii_col(filename, 'xy', 2)
    vega_mags,mag_errors = [read_ascii_col(filename,\
                            k,len(filters)) for k in ['mags','errs']]
    SNR,Crowd,Round,Sharp,Sky= [read_ascii_col(filename,\
                            k,len(filters)) for k in ['snr','crowd','round','sharp','sky']]

    all_in = lambda a, b: input_sources(data, a, b)
    all_out = lambda a,b: output_sources(xy,vega_mags,mag_errors,\
                                         SNR,Crowd,Round,Sharp,Sky,a,b)
    t1, t2 = all_in(0, 3), all_out(0, 3)  # only Z and H

    c1_in, c2_in, X, Y, typ_in = t1['c1_in'], t1['c2_in'], t1['X'], t1[
        'Y'], t1['typ_in']
    err, rnd, shr, crd, snr, sky = t2['err'], t2['rnd'], t2['shr'], t2[
        'crd'], t2['snr'], t2['sky']
    x, y, m1, m2 = t2['xy'][0], t2['xy'][1], t2['mag'][0], t2['mag'][1]

    tt = typ_in == 'point'
    c1_in, c2_in, X, Y, typ_in = c1_in[tt], c2_in[tt], X[tt], Y[tt], typ_in[tt]
    del tt

    tt = (err[0]<0.3)&(err[1]<0.3)&\
          (snr[0]>1)&(snr[0]<500)&(snr[1]>1)&(snr[1]<500)&\
           (shr[0]>-0.2)&(shr[0]<0.2)&(shr[1]>-0.4)&(shr[1]<0.4)&\
            (rnd[0]>-10)&(rnd[0]<15)&(rnd[1]>-10)&(rnd[1]<15)&\
             (crd[0]<0.1)&(crd[1]<0.4)
    x,y,m1,m2,snr1,snr2,sky1,sky2 = x[tt],y[tt],\
        m1[tt],m2[tt],snr[0][tt],snr[1][tt],sky[0][tt],sky[1][tt]
    del tt

    in1, typ_out = match_in_out(tol, X, Y, x, y, typ_in)

    tt = typ_out == 'point'
    x, y, snr1, snr2, sky1, sky2, typ_out = x[tt], y[tt], snr1[tt], snr2[
        tt], sky1[tt], sky2[tt], typ_out[tt]
    del tt

    tt = in1 != -1
    in1, X, Y, c1_in, c2_in, typ_in = in1[tt], X[tt], Y[tt], c1_in[tt], c2_in[
        tt], typ_in[tt]
    del tt

    in1, temp = match_in_out(tol, X, Y, x, y, typ_in)

    snr_out1 = [snr1[i] for i in in1]
    snr_out2 = [snr2[i] for i in in1]
    sky_out1 = [sky1[i] for i in in1]
    sky_out2 = [sky2[i] for i in in1]

    plot_xy(c1_in,
            snr_out1,
            xlabel='Count Rate (input)',
            ylabel='SNR (recovered)',
            title='WFI Z087, 10x10',
            xlim1=0,
            xlim2=10,
            ylim1=0,
            ylim2=310,
            outfile='Z087_10_10',
            fmt='png',
            n=4)
    plot_xy(c2_in,
            snr_out2,
            xlabel='Count Rate (input)',
            ylabel='SNR (recovered)',
            title='WFI H158, 10x10',
            xlim1=0,
            xlim2=10,
            ylim1=0,
            ylim2=110,
            outfile='H158_10_10',
            fmt='png',
            n=4)

    tab = [c1_in, snr_out1, sky_out1, c2_in, snr_out2, sky_out2]
    nms = ('Z087_Countrate', 'Z087_SNR', 'Z087_Sky', 'H158_Countrate',
           'H158_SNR', 'H158_Sky')
    fmt = {
        'Z087_Countrate': '%12.8f',
        'Z087_SNR': '%10.5f',
        'Z087_Sky': '%10.5f',
        'H158_Countrate': '%12.8f',
        'H158_SNR': '%10.5f',
        'H158_Sky': '%10.5f'
    }
    t = Table(tab, names=nms)
    ascii.write(t,
                'SNR_Count_10_10.txt',
                format='fixed_width',
                delimiter=' ',
                formats=fmt)

    return None
Exemple #37
0
def dofit(datfile='HST_CANDELS4_bush.sncosmo.dat',
          z=1.76,
          dz=0.53,
          t0=55803.1,
          dt0=25.0,
          x1=None,
          c=None,
          model='Ia',
          noUV=True,
          debug=False):

    # TODO : read in the redshift, etc from the header.

    # read in the obs data
    sn = ascii.read(datfile,
                    format='commented_header',
                    header_start=-1,
                    data_start=0)

    if model == 'Ia':
        # define SALT2 models and set initial guesses for z and t0
        if noUV:
            salt2ex = sncosmo.Model(source='salt2')
        else:
            salt2ex = sncosmo.Model(source='salt2-extended')
        salt2ex.source.set_peakmag(0., 'bessellb', 'ab')
        x0_AB0 = salt2ex.get('x0')
        salt2ex.set(z=z, t0=t0, x1=0.1, c=-0.2)
        # salt2ex.set( z=1.33, t0=56814.6, hostebv=0.05, hostr_v=3.1 )

        # Do a bounded fit :
        # salt2res, salt2fit = sncosmo.fit_lc( sn, salt2, ['z','t0','x0','x1','c'], bounds={'z':(1.28,1.37),'t0':(56804,56824)} )
        varlist = varlist = ['z', 't0', 'x0']
        bounds = {'z': (z - dz, z + dz), 't0': (t0 - dt0, t0 + dt0)}
        if x1 is not None:
            salt2ex.set(x1=x1)
            bounds['x1'] = (x1 - 1e-6, x1 + 1e-6)
            varlist.append('x1')
        else:
            bounds['x1'] = (-5, 5)
            varlist.append('x1')
        if c is not None:
            salt2ex.set(c=c)
        else:
            bounds['c'] = (-0.5, 3.0)
            varlist.append('c')

        res, fit = sncosmo.fit_lc(sn, salt2ex, varlist, bounds)

        x0 = fit.get('x0')
        z = fit.get('z')
        mB = -2.5 * np.log10(x0 / x0_AB0)
        distmod = mB - MBmodel
        deltamuLCDM = distmod - dm(z)
        print("mB = %.2f" % mB)
        print("dist.mod. = %.2f" % distmod)
        print("Delta.mu_LCDM = %.2f" % deltamuLCDM)

        chi2 = res.chisq
        ndof = res.ndof
        pval = chisqprob(chi2, ndof)

        if ndof > 0:
            print("chi2/dof= %.3f" % (chi2 / float(ndof)))
            print("p-value = %.3f" % pval)
        else:
            print("chi2/dof= %.3f/%i" % (chi2, ndof))
            print("p-value = %.3f" % pval)

        print("z = %.3f" % fit.get('z'))
        print("t0 = %.3f" % fit.get('t0'))
        print("x0 = %.3e" % fit.get('x0'))
        print("x1 = %.3f" % fit.get('x1'))
        print("c = %.3f" % fit.get('c'))

    elif model.lower() in ['cc', 'ib', 'ic', 'ii', 'ibc', 'iip', 'iin']:
        # remove the blue filters from the sn data
        bandlist = sn['filter'].data
        igood = np.array([band.startswith('f1') for band in bandlist])
        sn = sn.copy()[igood]

        # define a host-galaxy dust model
        dust = sncosmo.CCM89Dust()
        version = '1.0'

        if model.lower() == 'cc': classlist = ['Ib', 'Ic', 'IIP', 'IIn']
        elif model.lower() == 'ii': classlist = ['IIP', 'IIn']
        elif model.lower() == 'ibc': classlist = ['Ib', 'Ic']
        else: classlist = [model]

        # find the best-fit from each CC sub-class
        chi2list, reslist, fitlist = [], [], []
        for snclass in classlist:
            for tempnum in range(1, 10):
                Av = 0.2
                modname = snclass.lower() + '.%02i' % tempnum
                modkey = (sncosmo.Source, modname, version)
                if modkey not in sncosmo.registry._loaders: continue
                ccmodel = sncosmo.Model(source=modname,
                                        effects=[dust],
                                        effect_names=['host'],
                                        effect_frames=['rest'])
                ccmodel.set(z=z, t0=t0, hostr_v=3.1, hostebv=Av / 3.1)
                # Do a bounded fit :
                res, fit = sncosmo.fit_lc(sn,
                                          ccmodel,
                                          ['z', 't0', 'amplitude', 'hostebv'],
                                          debug=debug,
                                          bounds={
                                              'z': (z - dz, z + dz),
                                              't0': (t0 - dt0, t0 + dt0),
                                              'hostebv': (0.0, 1.0)
                                          })

                chi2 = res.chisq
                ndof = res.ndof
                pval = chisqprob(chi2, ndof)

                print("%s  chi2/dof= %.3f  p=%.3f" %
                      (modname, chi2 / float(ndof), pval))
                chi2list.append(chi2 / float(ndof))
                reslist.append(res)
                fitlist.append(fit)
        ichi2min = np.argmin(chi2list)
        res, fit = reslist[ichi2min], fitlist[ichi2min]
    else:  # 'nugent-sn91bg'
        # remove the blue filters from the sn data
        bandlist = sn['filter'].data
        igood = np.array([band.startswith('f1') for band in bandlist])
        sn = sn.copy()[igood]

        # define a host-galaxy dust model
        dust = sncosmo.CCM89Dust()
        version = '1.0'

        Av = 0.2
        altmodel = sncosmo.Model(source=model,
                                 effects=[dust],
                                 effect_names=['host'],
                                 effect_frames=['rest'])
        altmodel.set(z=z, t0=t0, hostr_v=3.1, hostebv=Av / 3.1)
        # Do a bounded fit :
        res, fit = sncosmo.fit_lc(sn,
                                  altmodel,
                                  ['z', 't0', 'amplitude', 'hostebv'],
                                  debug=debug,
                                  bounds={
                                      'z': (z - dz, z + dz),
                                      't0': (t0 - dt0, t0 + dt0),
                                      'hostebv': (0.0, 1.0)
                                  })

        chi2 = res.chisq
        ndof = res.ndof
        pval = chisqprob(chi2, ndof)

        print("%s  chi2/dof= %.3f  p=%.3f" % (model, chi2 / float(ndof), pval))

    return (sn, fit, res)
Exemple #38
0
from astropy.io import ascii
from astropy.table import Column
import numpy as np
import matplotlib.pyplot as plt

# Determines whether I sample multiple thresholds and create 'samplethresholds.dat' or whether I do it for just threshold and create 'threshold.dat'
samplethresholds = True
filetype = '80weighted'
inputhistogramfile = filetype + 'histogramofpoints0.5comp.dat'  # Input all the time
outputthresholdfile = filetype + 'threshold.dat'  # Output when samplethresholds=False
sampleoutputthresholdfile = filetype + 'samplethreshold0.5comp.dat'  #Output when samplethresholds=True
setthresholdup = 0.3
setthresholddown = setthresholdup - 0.02

# Reading in the synthetics and making new columns
allsynthetics = ascii.read('allsynthetics.dat', header_start=0, data_start=1)
allsynthetics = allsynthetics[np.where(allsynthetics['synthetic_id'] > 1412)]
colsynxmid = Column(['-9999.99999999' for x in range(len(allsynthetics))],
                    name='synxmid')
colsynduration = Column(['-0.99999999' for x in range(len(allsynthetics))],
                        name='synduration')
colsynoverlap = Column(['-0.99999999' for x in range(len(allsynthetics))],
                       name='synoverlap')
coluseroverlap = Column(['-0.99999999' for x in range(len(allsynthetics))],
                        name='useroverlap')
coluserxmin = Column(['-9999.99999999' for x in range(len(allsynthetics))],
                     name='userxmin')
coluserxmax = Column(['-9999.99999999' for x in range(len(allsynthetics))],
                     name='userxmax')
coluserxmid = Column(['-0.99999999' for x in range(len(allsynthetics))],
                     name='userxmid')
Exemple #39
0
#Folder to save the figures
figout = '/Users/galaxies-air/COSMOS/Images/'

#The location with the file for all of our data
fluxdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/lineflux_red.txt'

#Location of the equivalent width data
#ewdata = '/Users/galaxies-air/COSMOS/COSMOSData/lineew.txt'
#Read in the ew of the lines
#ew_df = ascii.read(ewdata).to_pandas()

#The location to store the scale and its stddev of each line
qualdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/dataqual.txt'
#Read in the scale of the lines
dataqual = ascii.read(qualdatapath).to_pandas()
d = {'True': True, 'False': False}

#File with the error array
errdatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs.txt'
#Read in the scale of the galaxies-air
err_df = ascii.read(errdatapath, data_start=1, header_start=0,
                    format='csv').to_pandas()

#File with the error array
errreddatapath = '/Users/galaxies-air/COSMOS/COSMOSData/errs_red.txt'
#Read in the scale of the lines
err_dfred = ascii.read(errreddatapath,
                       data_start=1,
                       header_start=0,
                       format='csv').to_pandas()
Exemple #40
0
def main():
    #myfuncs = ['constant', 'polynomial1', 'ackbar', 'eclipse', 'sine2'] 
    myfuncs = ['constant', 'polynomial1', 'model_ramp', 'eclipse', 'sine2'] 

    #significance above which to mask outliers
    #outlier_cut = 10.

    #parses command line input
    try: opts, args = \
            getopt.getopt(sys.argv[1:], 
                "hov", ["help", "show-plot", "run-mcmc", "plot-raw-data", 
                "plot-sys", "path=", "fit-white=", "divide-white"]
            )
    except getopt.GetoptError: usage()

    #defaults for command line flags
    verbose         = False
    output          = False
    show_plot       = False
    run_mcmc        = False
    run_lsq         = True
    plot_raw_data   = False
    path            = "spec_lc"
    fit_white       = False
    divide_white    = False

    for o, a in opts:
        if o in ("-h", "--help"): usage()
        elif o == "-o": output = True
        elif o == "-v": verbose = True
        elif o == "--show-plot": show_plot = True
        elif o == "--run-mcmc": run_mcmc, run_lsq = True, False
        elif o == "--run-lsq": run_lsq = True
        elif o == "--plot-raw-data": plot_raw_data = True
        elif o == "--path": path = a
        elif o == "--fit-white": fit_white, white_file = True, a
        elif o == "--divide-white": divide_white = True
        else: assert False, "unhandled option"

    flags = {'verbose': verbose, 'show-plot': show_plot, 
            'plot-raw-data': plot_raw_data, 'output': output, 
            'out-name': 'none.txt', 'run-lsq': run_lsq, 
            'run-mcmc': run_mcmc, 'divide-white': divide_white, 
            'fit-white': fit_white}		

    #reads in observation and fit parameters
    obs_par = {x['parameter']: x['value'] for x in 
                ascii.read("config/obs_par.txt", Reader=ascii.CommentedHeader)
              }
    fit_par =   ascii.read("config/fit_par.txt", Reader=ascii.CommentedHeader)

    files = glob.glob(os.path.join(path, "*"))		
    if fit_white: files = glob.glob(white_file)

    flags['out-name'] = "fit_" + pythontime.strftime("%Y_%m_%d_%H:%M") + ".txt"

    for f in files:
        data = Data(f, obs_par, fit_par)
        model = Model(data, myfuncs)
        data, model, params = lsq_fit(fit_par, data, flags, model, myfuncs)

        """ind = model.resid/data.err > 10.
        print "num outliers", sum(ind)
        data.err[ind] = 1e12
        data, model = lsq_fit(fit_par, data, flags, model, myfuncs)"""
    
        ##rescale error bars so reduced chi-squared is one
        """data.err *= np.sqrt(model.chi2red)                                      
        data, model, params = lsq_fit(fit_par, data, flags, model, myfuncs)"""
        if flags['verbose'] == True: print "rms, chi2red = ", model.rms, model.chi2red
        

        #FIXME : make this automatic!
        """outfile = open("white_systematics.txt", "w")
        for i in range(len(model.all_sys)): print>>outfile, model.all_sys[i]
        outfile.close()"""
                            
        if flags['run-mcmc']:
            output = mcmc_fit(data, model, params, f, obs_par, fit_par)     
Exemple #41
0
from astropy.table import QTable, Table
import astropy.units as u

try:
    from .version import __version__
except ImportError:
    __version__ = "unknown"
__all__ = []
# roentgen specific configuration
# load some data files on import

_package_directory = os.path.dirname(os.path.abspath(__file__))
_data_directory = os.path.abspath(os.path.join(_package_directory, 'data'))

elements_file = os.path.join(_data_directory, 'elements.csv')
elements = QTable(ascii.read(elements_file, format='csv'))

elements['density'].unit = u.g / (u.cm**3)
elements['i'].unit = u.eV
elements['ionization energy'].unit = u.eV
elements['atomic mass'] = elements['z'] / elements['zovera'] * u.u
elements.add_index('z')

compounds_file = os.path.join(_data_directory, 'compounds_mixtures.csv')
compounds = QTable(ascii.read(compounds_file, format='csv', fast_reader=False))
compounds['density'].unit = u.g / (u.cm**3)
compounds.add_index('symbol')

notation_translation = Table(
    ascii.read(os.path.join(_data_directory, 'siegbahn_to_iupac.csv'),
               format='csv',
Exemple #42
0
from glob import glob
from astropy.io import fits
from astropy.io import ascii
from astropy.visualization import ZScaleInterval
from astropy.time import Time
from astropy.coordinates import SkyCoord

from bokeh.io import curdoc
from bokeh.layouts import column, row, layout
from bokeh.models import Slider, ColumnDataSource, Button, CustomJS
from bokeh.models import Span, Range1d, LinearColorMapper, Whisker
from bokeh.models.glyphs import Text
from bokeh.plotting import figure

# Load in the ZTF data and convert to Pandas DataFrame
data = ascii.read('lc.txt').to_pandas()

# Load in ZTF Images at this object's RA/Dec
ZS = ZScaleInterval(nsamples=10000,
                    contrast=0.15,
                    max_reject=0.5,
                    min_npixels=5,
                    krej=2.5,
                    max_iterations=5)
fits_files = sorted(glob('ZTF_Sci_Files/*.fits'))
num_files = len(fits_files)
mjds_g, mjds_r = [], []  # Store MJD of image
imdat_g, imdat_r = [], []  # Store Image pixel data
vmin_g, vmin_r = [], []  # Store Z-Scale Minimums
vmax_g, vmax_r = [], []  # Store Z-Scale Maximums
hdrs_g, hdrs_r = [], []  # Store Image headers
Exemple #43
0
    def __init__(self,
                 tau_V,
                 geometry="dusty",
                 dust_type="mw",
                 dust_distribution="clumpy"):
        """
        Load the attenuation curves for a given geometry, dust type and
        dust distribution.

        Parameters
        ----------
        tau_V: float
           optical depth in V band

        geometry: string
           'shell', 'cloudy' or 'dusty'

        dust_type: string
           'mw' or 'smc'

        dust_distribution: string
           'homogeneous' or 'clumpy'

        Returns
        -------
        Attx: np array (float)
            Att(x) attenuation curve [mag]

        """
        # Ensure strings are lower cases
        self.geometry = geometry.lower()
        self.dust_type = dust_type.lower()
        self.dust_distribution = dust_distribution.lower()

        data_path = pkg_resources.resource_filename("dust_attenuation",
                                                    "data/WG00/")

        data = ascii.read(data_path + self.geometry + ".txt", header_start=0)

        if self.dust_type == "mw":
            start = 0
        elif self.dust_type == "smc":
            start = 25

        # Column names
        tau_colname = "tau"
        tau_att_colname = "tau_att"
        fsca_colname = "f(sca)"
        fdir_colname = "f(dir)"
        fesc_colname = "f(esc)"

        if self.dust_distribution == "clumpy":
            tau_att_colname += "_c"
            fsca_colname += "_c"
            fdir_colname += "_c"
            fesc_colname += "_c"

        elif self.dust_distribution == "homogeneous":
            tau_att_colname += "_h"
            fsca_colname += "_h"
            fdir_colname += "_h"
            fesc_colname += "_h"

        tau_att_list = []
        tau_list = []
        fsca_list = []
        fdir_list = []
        fesc_list = []

        len_data = len(data["lambda"])
        # number of lines between 2 models
        steps = 25

        counter = start
        while counter < len_data:
            tau_att_list.append(
                np.array(data[tau_att_colname][counter:counter + steps]))
            tau_list.append(
                np.array(data[tau_colname][counter:counter + steps]))
            fsca_list.append(
                np.array(data[fsca_colname][counter:counter + steps]))
            fdir_list.append(
                np.array(data[fdir_colname][counter:counter + steps]))
            fesc_list.append(
                np.array(data[fesc_colname][counter:counter + steps]))

            counter += int(2 * steps)

        # Convert to np.array and take transpose to have (wvl, tau_V)
        tau_att_table = np.array(tau_att_list).T
        tau_table = np.array(tau_list).T
        fsca_table = np.array(fsca_list).T
        fdir_table = np.array(fdir_list).T
        fesc_table = np.array(fesc_list).T

        # wavelength grid. It is the same for all the models
        wvl = np.array(data["lambda"][0:25])
        self.wvl_grid = wvl

        # Grid for the optical depth
        tau_V_grid = np.array([
            0.25,
            0.5,
            0.75,
            1.0,
            1.5,
            2.0,
            2.5,
            3.0,
            3.5,
            4.0,
            4.5,
            5.0,
            5.5,
            6.0,
            7.0,
            8.0,
            9.0,
            10.0,
            15.0,
            20.0,
            25.0,
            30.0,
            35.0,
            40.0,
            45.0,
            50.0,
        ])

        # Create a 2D tabular model for tau_att and all flux fraction
        tab = tabular_model(2, name="2D_table")

        # Values corresponding to the x and y grid points
        gridpoints = (wvl, tau_V_grid)

        self.model = tab(
            gridpoints,
            lookup_table=tau_att_table,
            name="tau_att_WG00",
            bounds_error=False,
            fill_value=None,
            method="linear",
        )

        self.tau = tab(
            gridpoints,
            lookup_table=tau_table,
            name="tau_WG00",
            bounds_error=False,
            fill_value=None,
            method="linear",
        )

        self.fsca = tab(
            gridpoints,
            lookup_table=fsca_table,
            name="fsca_WG00",
            bounds_error=False,
            fill_value=None,
            method="linear",
        )

        self.fdir = tab(
            gridpoints,
            lookup_table=fdir_table,
            name="fdir_WG00",
            bounds_error=False,
            fill_value=None,
            method="linear",
        )

        self.fesc = tab(
            gridpoints,
            lookup_table=fesc_table,
            name="fesc_WG00",
            bounds_error=False,
            fill_value=None,
            method="linear",
        )

        # In Python 2: super(WG00, self)
        # In Python 3: super() but super(WG00, self) still works
        super(WG00, self).__init__(tau_V=tau_V)
Exemple #44
0
                        dest='filename',
                        type=str,
                        required=False,
                        help='Input CSV filename',
                        default=None)
    parser.add_argument('--out',
                        dest='out',
                        type=str,
                        required=False,
                        help='Output filename',
                        default='lightcurves.csv')

    args = parser.parse_args()

    if args.names is None and args.filename is not None:
        a = ascii.read(args.filename, format='csv')
        args.names = list(a['name'])
    elif args.names is None and args.filename is None:
        print("No input candidates. Please use --n and provide ZTF names\
or --file to use a CSV file")
        #exit()

    # Read the secrets
    secrets = ascii.read('../kowalski/secrets.csv', format='csv')
    username_kowalski = secrets['kowalski_user'][0]
    password_kowalski = secrets['kowalski_pwd'][0]

    # Get the light curves
    light_curves_alerts = get_lightcurve_alerts(username_kowalski,
                                                password_kowalski, args.names)
    # Add prv_candidates photometry to the light curve
Exemple #45
0
def plot_light_curve_fit_from_SNANA(snid='colfax', plotmags=False):
    """ make a plot showing the light curve fit, extracted from a SNANA
    output .TEXT file generated by D.Scolnic"""
    from pytools import plotsetup, colorpalette as cp
    from astropy.io import ascii
    import numpy as np
    from matplotlib import pyplot as pl
    from scipy.interpolate import interp1d
    from matplotlib import ticker
    import os
    import sys
    from hstphot import hstzpt

    alpha2filter = {
        'H': 'f160w',
        'N': 'f140w',
        'J': 'f125w',
        'Y': 'f105w',
        'Z': 'f850l',
        'I': 'f814w',
        'V': 'f606w',
        'L': 'f098m',
        'O': 'f127m',
        'P': 'f139m',
        'Q': 'f153m',
    }
    colordict = {
        'H': 'k',
        'J': cp.darkgold,
        'Y': cp.cadetblue,
        'N': cp.darkgreen,
        'V': cp.coral
    }

    fluxdat = ascii.read('CANDELs-%s.LCPLOT.TEXT' % snid)
    name = fluxdat['col1']
    mjd = fluxdat['col2']
    tobs = fluxdat['col3']
    fluxcal = fluxdat['col4']
    fluxcalerr = fluxdat['col5']
    obsflag = fluxdat['col6']
    bandletter = fluxdat['col7']

    flux = fluxcal * 0.1
    fluxerr = fluxcalerr * 0.1
    # mag = np.where( flux>0, -2.5*np.log10( flux ) + 25, 35 )
    # magerr = np.where( flux>0, 1.0857 * fluxerr / flux, 0.2 )

    # medbanddict = {'f105w':'f098m','f125w':'f127m', 'f140w':'f139m', 'f160w':'f153m'}
    colors = [cp.purple, cp.bluegray, cp.darkgreen, cp.red]

    plotsetup.fullpaperfig([8, 3])
    pl.clf()
    fig = pl.gcf()

    mjdmin = mjd.min() - 10
    mjdmax = mjd.max() + 25
    mjdmod = np.arange(mjdmin, mjdmax, 1)

    if snid == 'colfax':
        ymax = 0.7
        ymaxMB = 0.3
        mjdmin = 56025
        mjdmax = 56220
        z, mjdpeak = 2.1, 56080.
        snlabel = 'GND12Col'
    elif snid == 'stone':
        ymax = 1.2
        ymaxMB = 0.35
        mjdmin = 56430
        mjdmax = 56600
        z, mjdpeak = 1.8, 56485.
        snlabel = 'GND13Sto'

    trestmin = (mjdmin - mjdpeak) / (1 + z)
    trestmax = (mjdmax - mjdpeak) / (1 + z)

    ax1w = pl.subplot2grid([4, 3], [0, 0], rowspan=3)
    ax2w = pl.subplot2grid([4, 3], [0, 1], rowspan=3, sharex=ax1w, sharey=ax1w)
    ax3w = pl.subplot2grid([4, 3], [0, 2], rowspan=3, sharex=ax1w, sharey=ax1w)
    ax1m = pl.subplot2grid([4, 3], [3, 0], sharex=ax1w)
    ax2m = pl.subplot2grid([4, 3], [3, 1], sharex=ax1w, sharey=ax1m)
    ax3m = pl.subplot2grid([4, 3], [3, 2], sharex=ax1w, sharey=ax1m)

    iax = 0
    for bbl, mbl, color in zip(['Y', 'J', 'N', 'H'], ['L', 'O', 'P', 'Q'],
                               colors):
        if bbl not in bandletter: continue
        iax += 1
        filternamew = alpha2filter[bbl].upper()
        filternamem = alpha2filter[mbl].upper()

        if iax == 1:
            axw = ax1w
            axm = ax1m
            axw.text(0.05,
                     0.92,
                     snlabel,
                     color='k',
                     fontsize='large',
                     fontweight='heavy',
                     ha='left',
                     va='top',
                     transform=axw.transAxes)
            # axw.text( -0.28, 1.2, snlabel, backgroundcolor='w', color='k', fontsize='large', fontweight='heavy', ha='left',va='top', transform=axw.transAxes, zorder=1000 )
            axw.set_ylabel('flux (zp$_{AB}$=25)')
            axm.set_ylabel('$\Delta$f$_{25}$')

        elif iax == 2:
            axw = ax2w
            axm = ax2m
        else:
            axw = ax3w
            axm = ax3m
            axw.yaxis.set_ticks_position('right')
            axw.yaxis.set_ticks_position('both')
            axm.yaxis.set_ticks_position('right')
            axm.yaxis.set_ticks_position('both')

        axtop = axw.twiny()
        axtop.set_xlim(trestmin, trestmax)
        if iax == 2:
            axtop.set_xlabel('rest frame time (days from peak)')
            axm.set_xlabel('observer frame time (MJD)')
            pl.setp(axw.get_yticklabels(), visible=False)
            pl.setp(axm.get_yticklabels(), visible=False)

        pl.setp(axw.get_xticklabels(), visible=False)

        axw.text(0.95,
                 0.92,
                 filternamew,
                 ha='right',
                 va='top',
                 color=color,
                 transform=axw.transAxes)

        iobs = np.where((bandletter == bbl) & (obsflag > 0))[0]
        imod = np.where((bandletter == bbl) & (obsflag == 0))[0]

        # import pdb; pdb.set_trace()
        axw.errorbar(mjd[iobs],
                     flux[iobs],
                     fluxerr[iobs],
                     color=color,
                     marker='D',
                     ls=' ',
                     zorder=-100)
        axw.plot(mjd[imod],
                 flux[imod],
                 color=color,
                 marker=' ',
                 ls='-',
                 zorder=-100)
        axw.fill_between(mjd[imod],
                         flux[imod] + fluxerr[imod],
                         flux[imod] - fluxerr[imod],
                         color=color,
                         alpha=0.3,
                         zorder=-1000)

        if mbl not in bandletter: continue
        axm.text(0.95,
                 0.5,
                 filternamem,
                 ha='right',
                 va='center',
                 backgroundcolor='w',
                 color=color,
                 transform=axm.transAxes)

        iobsmb = np.where((bandletter == mbl) & (obsflag > 0))[0]
        imodmb = np.where((bandletter == mbl) & (obsflag == 0))[0]

        mbfluxinterp = interp1d(mjd[imodmb],
                                flux[imodmb],
                                fill_value=0,
                                bounds_error=False)
        mbfluxdiff = flux[iobsmb] - mbfluxinterp(mjd[iobsmb])

        axm.errorbar(mjd[iobsmb],
                     mbfluxdiff,
                     fluxerr[iobsmb],
                     color=color,
                     marker='o',
                     mfc='w',
                     ls=' ')
        axm.fill_between(mjd[imodmb],
                         fluxerr[imodmb],
                         -fluxerr[imodmb],
                         color=color,
                         alpha=0.3,
                         zorder=-1000)
        axm.axhline(0, color=color, lw=0.8, ls='--')

    ax1w.set_xlim(mjdmin, mjdmax)
    ax1w.set_ylim(-0.08, ymax)
    ax1m.set_ylim(-ymaxMB, ymaxMB)

    ax1w.xaxis.set_major_locator(ticker.MultipleLocator(100))
    ax1w.xaxis.set_minor_locator(ticker.MultipleLocator(25))
    ax1w.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
    ax1w.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))
    ax1m.xaxis.set_major_locator(ticker.MultipleLocator(100))
    ax1m.xaxis.set_minor_locator(ticker.MultipleLocator(25))
    ax1m.yaxis.set_major_locator(ticker.MultipleLocator(0.2))
    ax1m.yaxis.set_minor_locator(ticker.MultipleLocator(0.1))

    fig.subplots_adjust(left=0.1,
                        bottom=0.18,
                        right=0.95,
                        top=0.82,
                        wspace=0.,
                        hspace=0)

    pl.draw()
                        default=None)
    parser.add_argument('--out',
                        dest='out',
                        type=str,
                        required=False,
                        help='Output filename',
                        default='lightcurves.csv')

    args = parser.parse_args()

    if args.names is None:
        print("No input candidates. Please use --n and provide ZTF names")
        exit()

    # Read the secrets
    secrets = ascii.read('../kowalski/secrets.csv', format='csv')
    username_kowalski = secrets['kowalski_user'][0]
    password_kowalski = secrets['kowalski_pwd'][0]

    # Get the light curves
    light_curves_alerts = get_lightcurve_alerts(username_kowalski,
                                                password_kowalski, args.names)

    # Add prv_candidates photometry to the light curve
    light_curves_aux = get_lightcurve_alerts_aux(username_kowalski,
                                                 password_kowalski, args.names)

    light_curves = light_curves_alerts + light_curves_aux

    # Create a table and output CSV file
    create_tbl_lc(light_curves, args.out)
Exemple #47
0
filename = filepath[slash + 1:]

#Tlist  = {'-5C': [], '5C': [], '15C': [], '23C': []}
# Tlist  = {'-10C': [[], [], []], '-5C': [[], [], []], '0C': [[], [], []], '5C': [[], [], []], '10C': [[], [], []], '23C': [[], [], []]}
#Tlist  = {'23C': [[], [], []]}
Tlist = {'5C': [[], [], []]}
CPlist = [100, 200, 300, 400, 500, 600]
Nlist = [300, 400, 500, 600]

outfile = open('/disk/lif2/spike/detectorData/' + detector + '/leakage.out',
               'w')

for T in Tlist:
    outfile.write('T = ' + str(T) + '\n')
    # First construct the maps of ADC_0V
    CPdata = asciio.read(filepath + '/' + filename + '_' + T + '.C0V.txt')
    Ndata = asciio.read(filepath + '/' + filename + '_' + T + '.N0V.txt')
    ADC_0V_CP = np.zeros((32, 32))
    ADC_0V_N = np.zeros((32, 32))

    for i in range(1024):
        CPcol = CPdata.field('col4')[START + i]
        CProw = CPdata.field('col5')[START + i]
        Ncol = Ndata.field('col4')[START + i]
        Nrow = Ndata.field('col5')[START + i]
        ADC_0V_CP[CProw, CPcol] = CPdata.field('col6')[START + i]
        ADC_0V_N[Nrow, Ncol] = Ndata.field('col6')[START + i]

    for HV in CPlist:
        outfile.write('HV = ' + str(HV) + 'V\n')
        outfile.write('CP mode' + '\n')
Exemple #48
0
p0 = 1.013250e5 * u.Pa
rho0 = 1.2250 * u.K
T0 = 288.15 * u.K
g0 = 9.80665 * u.m / u.s**2
S = 110.4 * u.K
Ti = 273.15 * u.K
beta = 1.458e-6 * u.kg / u.s / u.m / u.K**(0.5)
_gamma = 1.4
sigma = 3.65e-10 * u.m
N = 6.02257e26 * (u.kg * u.mol)**-1
R = 8314.32 * u.J / u.kg / u.K
R_air = 287.053 * u.J / u.kg / u.K

# Reading layer parameters file
coesa_file = get_pkg_data_filename("data/coesa62.dat")
coesa62_data = ascii.read(coesa_file)
b_levels = coesa62_data["b"].data
zb_levels = coesa62_data["Zb [km]"].data * u.km
hb_levels = coesa62_data["Hb [km]"].data * u.km
Tb_levels = coesa62_data["Tb [K]"].data * u.K
Lb_levels = coesa62_data["Lb [K/km]"].data * u.K / u.km
pb_levels = coesa62_data["pb [mbar]"].data * u.mbar


class COESA62(COESA):
    """ Holds the model for U.S Standard Atmosphere 1962. """
    def __init__(self):
        """ Constructor for the class. """
        super().__init__(b_levels, zb_levels, hb_levels, Tb_levels, Lb_levels,
                         pb_levels)
Exemple #49
0
    if plotdir == None:
        plotdir = os.path.dirname(filepath)
    if plotname == None:
        plotname = raw_input('What is the plot name: ')
        print plotname

    im.save(plotdir + '/' + plotname + '.pdf',
            format='pdf',
            transparent=True,
            dpi=300)


# read in DIGIT source list
# filename = '/Users/yaolun/data/digit_source'
filename = '/Users/yaolun/data/hst24_sources'
digit = ascii.read(filename)
digit_coord = []
for i in range(len(digit)):
    c = SkyCoord(digit['RA'][i] + ' ' + digit['DEC'][i],
                 unit=(u.hourangle, u.deg))
    digit_coord.append(c)

objdir = '/Users/yaolun/data/digit_hst/2MASS/'

# flux calibration data
fnu_mag = {'J': [1594., 27.8], 'H': [1024., 20.]}

# 2MASS search parameters
band = 'J'
size = '0'
def watson_2014_wp(mstar_thresh=10**9.49, sample='all'):
    """
    projected two point correlation function measurements from Hearin et al. 2014
    
    http://arxiv.org/abs/1403.1578
    
    Parameters
    ----------
    mstar_thresh : float
         minimum stellar mass of the threshold sample in :math:`h^{-2}M_{\odot}`
         e.g. 9.49, 9.89, 10.29 (converted to h=1).
    
    sample : string
        string indicating sample used in the wp calculation:
        e.g. 'all', 'red', 'blue'.
    
    Returns
    -------
    measurement : numpy.ndarray
        array of shape (2,15), where the first row is rp in :math:`h^-1` Mpc, and 
        the second row is wp in :math:`h^-1` Mpc.
    
    err : numpy.array
        error on the wp measurement
    """

    littleh = 0.7

    #get files for specified sample
    if sample == 'all':
        filename = 'table_1.dat'
    elif sample == 'red':
        filename = 'table_A2.dat'
    elif sample == 'blue':
        filename = 'table_A1.dat'
    else:
        msg = ("sample not recognized.")
        raise ValueError(msg)

    #what are the mass thresholds in h=1?
    mstar_thresholds = np.array([10.0**9.8, 10.0**10.2, 10.0**10.6
                                 ]) * littleh**2.0
    mstar_thresholds = np.log10(mstar_thresholds)

    #find nearest to the input value
    mstar_thresh = np.log10(mstar_thresh)
    mask = np.isclose(mstar_thresh, mstar_thresholds, atol=0.01)

    if np.any(mask):
        mstar_thresh = mstar_thresholds[mask]
    else:
        msg = ("mass threshold not with 0.01 dex of an available threshold.")
        raise ValueError(msg)

    #get files for specified stellar mass bin
    if mask[0]:
        column = 1
    elif mask[1]:
        column = 3
    elif mask[2]:
        column = 5
    else:
        msg = ("requested mass threshold not available.")
        raise ValueError(msg)

    #read in data
    filepath = os.path.dirname(__file__)
    filepath = os.path.join(filepath, 'wp_measurements/watson_2014_data/')
    data = ascii.read(filepath + filename)

    rp = np.array(data.columns[0])
    wp = np.array(data.columns[column])
    sigma = np.array(data.columns[column + 1])

    measurement = np.vstack((rp, wp))

    #convert to h=1
    measurement[0] = measurement[0, :]  #*littleh
    measurement[1] = measurement[1, :]  #*littleh
    sigma = sigma  #*littleh

    return measurement, sigma
Exemple #51
0
def compare_to_by_eye(fitspath: str, dataset: str):
    """
    This function takes the automated validation table and checks it against
    inputted measurement that are determined by eye. These inputted
    measurements are in the np.where statements. It outputs a revised
    validation table based on the inputted measurements.

    Usage:
      valid_table.make_validation_table(fitspath, dataset)

    :param fitspath: Full file path where the input file is and where the
                     output file will be placed.
    :param dataset: Determine which eye measurements to use

    Outputs:
      fitspath + 'bin_validation_revised.tbl' and '.csv'
        Validation table containing bin IDs; number of galaxies in each bin;
        and column indicating OIII4363 detection/non-detection,
        OIII4363_Flux_Observed, OIII4363_S/N, Notes
    """

    ver_table = fitspath + filename_dict['bin_valid']
    ver_tab = asc.read(ver_table)
    indicate = ver_tab['Detection']
    ID = ver_tab['bin_ID']

    # Detections By Eye
    if dataset == 'Voronoi20':
        det_4363 = \
            np.where(
                (ID == 0) | (ID == 2) | (ID == 3) | (ID == 5) | (ID == 6))[0]
    if dataset == 'Voronoi14':
        det_4363 = \
            np.where(
                (ID == 0) | (ID == 7) | (ID == 10) | (ID == 11) | (ID == 12))[
                0]
    if dataset == 'Voronoi10':
        det_4363 = np.where((ID == 1) | (ID == 9) | (ID == 18) | (ID == 21))[0]
    if dataset == 'Grid':
        det_4363 = np.where(
            (ID == 11) | (ID == 13) | (ID == 19) | (ID == 20) | (ID == 21))[0]
    if dataset == 'R23_Grid':
        det_4363 = np.where((ID == 0) | (ID == 4) | (ID == 5) | (ID == 6))[0]
    if dataset == 'O32_Grid':
        det_4363 = np.where((ID == 6))[0]
    if dataset == 'Double_Bin':
        det_4363 = \
            np.where(
                (ID == 0) | (ID == 1) | (ID == 2) | (ID == 7) | (ID == 9) |
                (ID == 10) | (ID == 11) | (ID == 13))[0]
    # make_validation_table will now produce correct detection values
    if dataset == 'n_Bins':
        det_4363 = np.where(
            (ID == 10) | (ID == 14) | (ID == 15) | (ID == 20) |
            (ID == 23) | (ID == 26))[0]
        rlimit = \
            np.where((ID == 8) | (ID == 11) | (ID == 13) | (ID == 16)
                     | (ID == 17) | (ID == 19) | (ID == 22))[0]

    # Caroline: Add you conditions here

    check_ID = np.zeros(len(ID))
  
    check_ID[det_4363] = 1
    if dataset == 'n_Bins':
        check_ID[rlimit] = 0.5

    for ii in range(len(ID)):
        if check_ID[ii] == indicate[ii]:
            print(ID[ii], 'matches with by eye validation')
        else:
            print('*****', ID[ii],
                  'does not match calculated values. Please check!')

    # This is where I need to add the column for notes
    if dataset == 'n_Bins':
        notes = ['N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',
                 'Broad features, but reliable OIII5007 and HGAMMA',
                 'Bad fit, but good OIII5007', 'N/A',
                 'Changed to robust limit 3/22/21', 'N/A', 'N/A', 'N/A', 'N/A',
                 'High Temperature',
                 'not fit well, but reliable OIII5007 and HGAMMA',
                 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A', 'N/A',
                 'N/A']
        note_add = Column(name='Notes', data=notes)
        ver_tab.add_column(note_add, 5)

    # Caroline: Add your notes column here and copy the note_add and ver_tab.add_column lines to your if statement

    ver_tab.remove_column('Detection')
    
    detect_add = Column(name='Detection', data=check_ID)
    ver_tab.add_column(detect_add, 2)

    asc.write(ver_tab, fitspath + filename_dict['bin_valid_rev'],
              format='fixed_width_two_line')
    asc.write(ver_tab, fitspath + 'bin_validation_revised.csv', format='csv')
Exemple #52
0
    def __init__(self,
                 mini=9.0,
                 mini_keyword='Mini',
                 tracks='BPASS',
                 Z='z014',
                 bpass_M=1.0,
                 bpass_q=0.5,
                 bpass_logP=2,
                 bpass_cols='default',
                 **kwargs):
        self.mini = mini
        self.kwargs = kwargs
        self.Z = Z
        assert tracks == 'BPASS', "This version of evo_tracks is only for demo purposes"
        if bpass_M % 1 == 0:
            m_str = int(bpass_M)
        else:
            m_str = bpass_M
        q_str = bpass_q
        if not isinstance(bpass_logP, str):
            if bpass_logP % 1 == 0:
                P_str = int(bpass_logP)
        else:
            P_str = bpass_logP

        #We're only working with solar metallicity for now
        if not ((self.Z == 'z014') | (self.Z == 'z002')):
            raise NotImplementedError("Unsupported metallicity")

        #Check to see if user wants more columns than the default
        names = default_bpass_col_names
        if bpass_cols != 'default':
            if type(bpass_cols) != list:
                raise TypeError(
                    "This argument should be a list of additional columns to include"
                )
            else:
                #For each name in the list, check if its already included, then include it
                for new_name in bpass_cols:
                    if new_name in names:
                        pass
                    else:
                        names.append(new_name)

        #Get the indices of the columns for reading in
        index = [bpass_col_dict[col] for col in names]
        if bpass_logP == 'inf':
            file_name = bpass_dir + 'sneplot-{0}-{1}'.format(Z, str(m_str))
        else:
            file_name = bpass_dir + '/sneplot-{0}-{1}-{2}-{3}'.format(
                Z, str(m_str), str(q_str), str(P_str))

        #Check if the track exists in the directory...
        if os.path.isfile(file_name):

            #read in only given columns
            track = ascii.read(
                file_name, include_names=['col{}'.format(i) for i in index])

            #rename columns for easy access later
            for i, name in zip(index, names):
                track['col{}'.format(i)].name = name

            #Create columns with info to remember the initial mass, mass ratio, period
            M1_i = Column([bpass_M for i in range(len(track))], name='M1_i')
            q_i = Column([bpass_q for i in range(len(track))], name='q_i')
            logP_i = Column([bpass_logP for i in range(len(track))],
                            name='logP_i')

            track.add_columns([M1_i, q_i, logP_i])

            #Assign object variable with this track
            self.track = track

        else:
            print(file_name)
            raise FileNotFoundError(
                'That combo of mass, q and logP does not exist')
Exemple #53
0
import numpy as np
import pandas as pd
import gc; gc.enable()
import featuretools as ft
from astropy.io import ascii

import featuretools.primitives
from featuretools.primitives import AddNumeric

# Initialize Timer
t0 = time.time()

# Load Dataset
print ("-" * 100)
print ("Load Training File")
data = ascii.read('data/raw/plti/kplr_dr25_inj1_plti.txt').to_pandas()

print(data.head())

# Separate Features and Target Values
print ("-" * 100)
print ("Set X Features and y Target")
feat = ['KIC_ID', 'Sky_Group', 'i_period', 'i_epoch', 'N_Transit', 'i_depth', 'i_dur', 'i_b', 'i_ror', 'i_dor', 'Expected_MES']
data = data[feat]

# Build Entity Set and Append Data Entity
es = ft.EntitySet(id = 'kplr')
es.entity_from_dataframe(entity_id = 'data', dataframe = data, index='KIC_ID')

# Generate Feature Matrix
prims = ['add_numeric', 'subtract_numeric', 'multiply_numeric', 'divide_numeric', 'modulo_by_feature', 'divide_by_feature']
Exemple #54
0
def main():
    
    #data sets
    sva1_gold = fits.open('/Users/Christina/DES/data/sva1_gold_detmodel_gals.fits')[1].data

    #COSMOS2015 photo-z
    #z_cosmos = 9.99 --> X-ray object, z_cosmos = 0 --> star
    z_cosmos = ascii.read('sva1_gold_cosmos_photoz.tab')['redshift']
    z0mask = (z_cosmos > 0) & (z_cosmos < 9.9)
    z3mask = (z_cosmos >= 3.) & (z_cosmos < 9.9)
    z4mask = (z_cosmos >= 4.) & (z_cosmos < 9.9)

    #fluxes and errors from sva1_gold
    flux_cosmos = ascii.read('sva1_gold_cosmos_detmodel_fluxes.tab')
       
#    lo_z_flux_cosmos = {'g' : flux_cosmos['g'][z0mask & ~z4mask],
#                        'r' : flux_cosmos['r'][z0mask & ~z4mask],
#                        'i' : flux_cosmos['i'][z0mask & ~z4mask],
#                        'z' : flux_cosmos['z'][z0mask & ~z4mask],
#                        'Y' : flux_cosmos['Y'][z0mask & ~z4mask],
#                        'gerr' : flux_cosmos['gerr'][z0mask & ~z4mask],
#                        'rerr' : flux_cosmos['rerr'][z0mask & ~z4mask],
#                        'ierr' : flux_cosmos['ierr'][z0mask & ~z4mask],
#                        'zerr' : flux_cosmos['zerr'][z0mask & ~z4mask],
#                        'Yerr' : flux_cosmos['Yerr'][z0mask & ~z4mask]}

#    hi_z_flux_cosmos = {'g' : flux_cosmos['g'][z4mask],
#                        'r' : flux_cosmos['r'][z4mask],
#                        'i' : flux_cosmos['i'][z4mask],
#                        'z' : flux_cosmos['z'][z4mask],
#                        'Y' : flux_cosmos['Y'][z4mask],
#                        'gerr' : flux_cosmos['gerr'][z4mask],
#                        'rerr' : flux_cosmos['rerr'][z4mask],
#                        'ierr' : flux_cosmos['ierr'][z4mask],
#                        'zerr' : flux_cosmos['zerr'][z4mask],
#                        'Yerr' : flux_cosmos['Yerr'][z4mask]}

    mag_cosmos = ascii.read('sva1_gold_cosmos_detmodel_mags.tab')
                
    lo_z_mag_cosmos = {'g' : mag_cosmos['g'][z0mask & ~z4mask],
                       'r' : mag_cosmos['r'][z0mask & ~z4mask],
                       'i' : mag_cosmos['i'][z0mask & ~z4mask],
                       'z' : mag_cosmos['z'][z0mask & ~z4mask],
                       'Y' : mag_cosmos['Y'][z0mask & ~z4mask],
                       'gerr' : mag_cosmos['gerr'][z0mask & ~z4mask],
                       'rerr' : mag_cosmos['rerr'][z0mask & ~z4mask],
                       'ierr' : mag_cosmos['ierr'][z0mask & ~z4mask],
                       'zerr' : mag_cosmos['zerr'][z0mask & ~z4mask],
                       'Yerr' : mag_cosmos['Yerr'][z0mask & ~z4mask]}

    hi_z_mag_cosmos = {'g' : mag_cosmos['g'][z4mask],
                       'r' : mag_cosmos['r'][z4mask],
                       'i' : mag_cosmos['i'][z4mask],
                       'z' : mag_cosmos['z'][z4mask],
                       'Y' : mag_cosmos['Y'][z4mask],
                       'gerr' : mag_cosmos['gerr'][z4mask],
                       'rerr' : mag_cosmos['rerr'][z4mask],
                       'ierr' : mag_cosmos['ierr'][z4mask],
                       'zerr' : mag_cosmos['zerr'][z4mask],
                       'Yerr' : mag_cosmos['Yerr'][z4mask]}

    good = np.loadtxt('sva1_gold_goodregions_indices.txt.gz', dtype=int)

#    flux_sva1_gold = {'g' : sva1_gold['flux_detmodel_g'][good],
#                      'r' : sva1_gold['flux_detmodel_r'][good],
#                      'i' : sva1_gold['flux_detmodel_i'][good],
#                      'z' : sva1_gold['flux_detmodel_z'][good],
#                      'Y' : sva1_gold['flux_detmodel_Y'][good],
#                      'gerr' : sva1_gold['fluxerr_detmodel_g'][good],
#                      'rerr' : sva1_gold['fluxerr_detmodel_r'][good],
#                      'ierr' : sva1_gold['fluxerr_detmodel_i'][good],
#                      'zerr' : sva1_gold['fluxerr_detmodel_z'][good],
#                      'Yerr' : sva1_gold['fluxerr_detmodel_Y'][good]}
    
    mag_sva1_gold = {'g' : sva1_gold['mag_detmodel_g'][good],
                     'r' : sva1_gold['mag_detmodel_r'][good],
                     'i' : sva1_gold['mag_detmodel_i'][good],
                     'z' : sva1_gold['mag_detmodel_z'][good],
                     'Y' : sva1_gold['mag_detmodel_Y'][good],
                     'gerr' : sva1_gold['magerr_detmodel_g'][good],
                     'rerr' : sva1_gold['magerr_detmodel_r'][good],
                     'ierr' : sva1_gold['magerr_detmodel_i'][good],
                     'zerr' : sva1_gold['magerr_detmodel_z'][good],
                     'Yerr' : sva1_gold['magerr_detmodel_Y'][good]}

    N_try_lo = 1000
    N_try_lo_cosmos = len(lo_z_mag_cosmos['g'])
    N_try_hi = 1000
    N_try_hi_cosmos = len(hi_z_mag_cosmos['g'])

    lomags = np.array( zip(mag_sva1_gold['g'][:N_try_lo],
                           mag_sva1_gold['r'][:N_try_lo],
                           mag_sva1_gold['i'][:N_try_lo]) )
    lomagerrs = np.array( zip(mag_sva1_gold['gerr'][:N_try_lo],
                              mag_sva1_gold['rerr'][:N_try_lo],
                              mag_sva1_gold['ierr'][:N_try_lo]) )
    cosmoslomags = np.array( zip(lo_z_mag_cosmos['g'][:N_try_lo_cosmos],
                                 lo_z_mag_cosmos['r'][:N_try_lo_cosmos],
                                 lo_z_mag_cosmos['i'][:N_try_lo_cosmos]) )

    himags = np.array( zip(mag_sva1_gold['g'][:N_try_hi],
                           mag_sva1_gold['r'][:N_try_hi],
                           mag_sva1_gold['i'][:N_try_hi]) )
    himagerrs = np.array( zip(mag_sva1_gold['gerr'][:N_try_hi],
                              mag_sva1_gold['rerr'][:N_try_hi],
                              mag_sva1_gold['ierr'][:N_try_hi]) )
    cosmoshimags = np.array( zip(hi_z_mag_cosmos['g'][:N_try_hi_cosmos],
                                 hi_z_mag_cosmos['r'][:N_try_hi_cosmos],
                                 hi_z_mag_cosmos['i'][:N_try_hi_cosmos]) )


    def ntest(vals, errs, truevals, nfunc, N_try, N_try_cosmos):
        start = time.time()
        out = nfunc(vals, errs, truevals)
        end = time.time()
        print "For {}: {}-d mag vector, {} gold galaxies, {} COSMOS galaxies: {}".format(nfunc.func_name,
                                                                                         len(vals[0]),
                                                                                         N_try,
                                                                                         N_try_cosmos,
                                                                                         end-start)

        return out

    def plotn(n_lo, n_hi, save_file):
        plt.scatter( mag_sva1_gold['r'][:N_try_hi] - mag_sva1_gold['i'][:N_try_hi],
                     mag_sva1_gold['g'][:N_try_hi] - mag_sva1_gold['r'][:N_try_hi],
                     c=(n_hi/N_try_hi_cosmos)/( (n_hi/N_try_hi_cosmos)+(n_lo/N_try_lo_cosmos) ),
                     edgecolor='none')
        plt.xlabel('mag_detmodel_r - mag_detmodel_i')
        plt.ylabel('mag_detmodel_g - mag_detmodel_r')
        plt.colorbar(label='hi/(hi+lo)')
        plt.xlim(-1,4)
        plt.ylim(-1,4)
        plt.savefig(save_file)
        plt.close()

    n_lo_z = ntest(lomags, lomagerrs, cosmoslomags, n, N_try_lo, N_try_lo_cosmos)
    n1_lo_z = ntest(lomags, lomagerrs, cosmoslomags, n1, N_try_lo, N_try_lo_cosmos)
    n2_lo_z = ntest(lomags, lomagerrs, cosmoslomags, n2, N_try_lo, N_try_lo_cosmos)
    n3_lo_z = ntest(lomags, lomagerrs, cosmoslomags, n3, N_try_lo, N_try_lo_cosmos)
    ntree_lo_z = ntest(lomags, lomagerrs, cosmoslomags, ntree, N_try_lo, N_try_lo_cosmos)

    n_hi_z = ntest(himags, himagerrs, cosmoshimags, n, N_try_hi, N_try_hi_cosmos)
    n1_hi_z = ntest(himags, himagerrs, cosmoshimags, n1, N_try_hi, N_try_hi_cosmos)
    n2_hi_z = ntest(himags, himagerrs, cosmoshimags, n2, N_try_hi, N_try_hi_cosmos)
    n3_hi_z = ntest(himags, himagerrs, cosmoshimags, n3, N_try_hi, N_try_hi_cosmos)
    ntree_hi_z = ntest(himags, himagerrs, cosmoshimags, ntree, N_try_hi, N_try_hi_cosmos)


    plotn(n_lo_z, n_hi_z, 'hi_z_cosmos_mag_densities_ri_gr_n')
    plotn(n1_lo_z, n1_hi_z, 'hi_z_cosmos_mag_densities_ri_gr_n1')
    plotn(n2_lo_z, n2_hi_z, 'hi_z_cosmos_mag_densities_ri_gr_n2')
    plotn(n3_lo_z, n3_hi_z, 'hi_z_cosmos_mag_densities_ri_gr_n3')
    plotn(ntree_lo_z, ntree_hi_z, 'hi_z_cosmos_mag_densities_ri_gr_ntree')

    if (np.all(n1_lo_z==n2_lo_z) and np.all(n2_lo_z==n3_lo_z) and np.all(n1_hi_z==n_hi_z)):
        check = 'True'
    else:
        check = 'False'

    print "Outputs are the same for n,n1,n2,n3?:" + check
Exemple #55
0
def read_table(filename):
    """ Read ascii table """
    t = ascii.read(filename)
    return t
Exemple #56
0

def string_to_dec(string):
    '''convert coordinates from Kreckel et al. (2017) to astropy
    
    the declination in the paper is given as "01:36:42.212" 
    but astropy requires "01d36m42.212s".
    This function replaces the ":" with the appropriate character.
    '''
    return string.replace(':', 'd', 1).replace(':', 'm') + 's'


# from Kreckel+2017

pn_NGC628_kreckel = ascii.read(basedir / 'data' / 'external' /
                               'kreckel_pn_candidates.txt',
                               format='csv',
                               delimiter=';')
snr_NGC628_kreckel = ascii.read(basedir / 'data' / 'external' /
                                'kreckel_snr_candidates.txt',
                                format='csv',
                                delimiter=';')

# convert string to astronomical coordinates
pn_NGC628_kreckel['RA'] = list(map(string_to_ra, pn_NGC628_kreckel['RA']))
pn_NGC628_kreckel['DEC'] = list(map(string_to_dec, pn_NGC628_kreckel['DEC']))
pn_NGC628_kreckel.meta['reference'] = 'Kreckel+2017'
pn_NGC628_kreckel.meta['bibcode'] = '2017ApJ...834..174K'

pn_NGC628_kreckel['OIII'] = 10**((pn_NGC628_kreckel['mOIII'] + 13.74) / -2.5)
pn_NGC628_kreckel['Ha/NII'][pn_NGC628_kreckel['Ha/NII'] == 'L'] = '1e-30'
pn_NGC628_kreckel['Ha/SII'][pn_NGC628_kreckel['Ha/SII'] == 'L'] = '1e-30'
Exemple #57
0
    print 'Field: %s' % (field)
    print 'CCD: %s' % (CCD)

    if field[:8] == 'Blind15A':
        color_bool = True
    else:
        color_bool = False

    # Load lightcurves from archive for each CCDs
    print 'Loading LC for this field/CCD...'

    if file_path:
        lcs_id = []
        lcs_g = []
        data = ascii.read(file_path, format='cds')
        print data
    else:
        if CCD == 'all':
            p4j_bool = False
            gatspy_bool = False
            color_bool = False
            lcs_id, lcs_g = read_lc_field(field, band=band)
            mjd_key = 'mjd'
            mag_key = 'aperture_mag_%i' % (n_app)
            emag_key = 'aperture_mag_err_%i' % (n_app)

        else:
            lcs_id, lcs_g = read_lc(field, CCD, occ=occ, band=band)
            mjd_key = 'MJD'
            mag_key = 'MAG_KRON'
Exemple #58
0
    if sec == 1: title = 'High Spin - Low Mass Galaxies'
    elif sec == 3: title = 'High Spin - High Mass Galaxies'
    elif sec == 4: title = 'Low Spin - Low Mass Galaxies'
    elif sec == 6: title = 'Low Spin - High Mass Galaxies'
    elif sec == 123: title = 'High Spin Galaxies'
    elif sec == 456: title = 'Low Spin Galaxies'
    elif sec == 0: title = 'All Galaxies'
    else: title = sec

    axs[0].set_title(title)

    for ax, vtype in zip(axs, ['a', 'r', 's']):

        etaTable = ascii.read('../data/eta/eta_minradV{}_maxradV{}_sec{}_vtype{}.txt'\
                .format(minradV,maxradV,sec,vtype),\
                names=['eta','eta_std','eta_random_mean','eta_random_std','rmin','rmax'])

        y = etaTable['eta'].data
        yerr = etaTable['eta_std'].data

        yran_mean = etaTable['eta_random_mean'].data
        yran_err = etaTable['eta_random_std'].data

        x = (etaTable['rmin'].data + etaTable['rmax'].data) / 2

        # Theoretical n1/n2 value for random spins
        ax.hlines(1 / (np.sqrt(2) - 1), x[0], x[-1], linestyles=':')
        ax.plot(x, yran_mean, c='k', alpha=.7)

        ax.fill_between(x,
def calc_eccentricity(args, options):
    table = os.path.join(args[0], 'table2.dat')
    readme = os.path.join(args[0], 'ReadMe')
    dierickx = ascii.read(table, readme=readme)
    vxvv = np.dstack([
        dierickx['RAdeg'], dierickx['DEdeg'], dierickx['Dist'] / 1e3,
        dierickx['pmRA'], dierickx['pmDE'], dierickx['HRV']
    ])[0]
    ro, vo, zo = 8., 220., 0.025
    ra, dec = vxvv[:, 0], vxvv[:, 1]
    lb = bovy_coords.radec_to_lb(ra, dec, degree=True)
    pmra, pmdec = vxvv[:, 3], vxvv[:, 4]
    pmllpmbb = bovy_coords.pmrapmdec_to_pmllpmbb(pmra,
                                                 pmdec,
                                                 ra,
                                                 dec,
                                                 degree=True)
    d, vlos = vxvv[:, 2], vxvv[:, 5]
    rectgal = bovy_coords.sphergal_to_rectgal(lb[:, 0],
                                              lb[:, 1],
                                              d,
                                              vlos,
                                              pmllpmbb[:, 0],
                                              pmllpmbb[:, 1],
                                              degree=True)
    vsolar = np.array([-10.1, 4.0, 6.7])
    vsun = np.array([
        0.,
        1.,
        0.,
    ]) + vsolar / vo
    X = rectgal[:, 0] / ro
    Y = rectgal[:, 1] / ro
    Z = rectgal[:, 2] / ro
    vx = rectgal[:, 3] / vo
    vy = rectgal[:, 4] / vo
    vz = rectgal[:, 5] / vo
    vsun = np.array([
        0.,
        1.,
        0.,
    ]) + vsolar / vo
    Rphiz = bovy_coords.XYZ_to_galcencyl(X, Y, Z, Zsun=zo / ro)
    vRvTvz = bovy_coords.vxvyvz_to_galcencyl(vx,
                                             vy,
                                             vz,
                                             Rphiz[:, 0],
                                             Rphiz[:, 1],
                                             Rphiz[:, 2],
                                             vsun=vsun,
                                             Xsun=1.,
                                             Zsun=zo / ro,
                                             galcen=True)
    #do the integration and individual analytic estimate for each object
    ts = np.linspace(0., 20., 10000)
    lp = LogarithmicHaloPotential(normalize=1.)
    e_ana = numpy.zeros(len(vxvv))
    e_int = numpy.zeros(len(vxvv))
    print(
        'Performing orbit integration and analytic parameter estimates for Dierickx et al. sample...'
    )
    for i in tqdm(range(len(vxvv))):
        try:
            orbit = Orbit(vxvv[i], radec=True, vo=220., ro=8.)
            e_ana[i] = orbit.e(analytic=True, pot=lp, c=True)
        except UnboundError:
            e_ana[i] = np.nan
        orbit.integrate(ts, lp)
        e_int[i] = orbit.e(analytic=False)
    fig = plt.figure()
    fig.set_size_inches(1.5 * columnwidth, 1.5 * columnwidth)
    plt.scatter(e_int, e_ana, s=1, color='Black', lw=0.)
    plt.xlabel(r'$\mathrm{galpy\ integrated}\ e$')
    plt.ylabel(r'$\mathrm{galpy\ analytic}\ e$')
    plt.xlim(0., 1.)
    plt.ylim(0., 1.)
    fig.tight_layout()
    plt.savefig(os.path.join(args[0], 'dierickx-integratedeanalytice.png'),
                format='png',
                dpi=200)
    fig = plt.figure()
    fig.set_size_inches(1.5 * columnwidth, 1.5 * columnwidth)
    plt.hist(e_int, bins=30)
    plt.xlim(0., 1.)
    plt.xlabel(r'$\mathrm{galpy}\ e$')
    fig.tight_layout()
    plt.savefig(os.path.join(args[0], 'dierickx-integratedehist.png'),
                format='png',
                dpi=200)
    fig = plt.figure()
    fig.set_size_inches(1.5 * columnwidth, 1.5 * columnwidth)
    plt.scatter(dierickx['e'], e_int, s=1, color='Black', lw=0.)
    plt.xlabel(r'$\mathrm{Dierickx\ et\ al.}\ e$')
    plt.ylabel(r'$\mathrm{galpy\ integrated}\ e$')
    plt.xlim(0., 1.)
    plt.ylim(0., 1.)
    fig.tight_layout()
    plt.savefig(os.path.join(args[0], 'dierickx-integratedee.png'),
                format='png',
                dpi=200)
    fig = plt.figure()
    fig.set_size_inches(1.5 * columnwidth, 1.5 * columnwidth)
    plt.scatter(dierickx['e'], e_ana, s=1, color='Black', lw=0.)
    plt.xlabel(r'$\mathrm{Dierickx\ et\ al.}\ e$')
    plt.ylabel(r'$\mathrm{galpy\ estimated}\ e$')
    plt.xlim(0., 1.)
    plt.ylim(0., 1.)
    fig.tight_layout()
    plt.savefig(os.path.join(args[0], 'dierickx-analyticee.png'),
                format='png',
                dpi=200)
    arr = numpy.recarray(len(e_ana),
                         dtype=[('analytic_e', float),
                                ('integrated_e', float)])
    arr['analytic_e'] = e_ana
    arr['integrated_e'] = e_int
    with open(os.path.join(args[0], 'eccentricities.dat'), 'w') as file:
        pickle.dump(arr, file)
        file.close()
Exemple #60
0
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import math

from matplotlib import colors as mcolors
from matplotlib import gridspec

##
##  V H z Q    data
##
path = '/cos_pc19a_npr/data/highest_z_QSOs/'
#filename ='THE_TABLE_v0pnt96.dat'
filename = 'THE_TABLE_v0pnt97x_PS1_ULAS_VHS_photom_v2.dat'
table = path + filename
VHzQ = ascii.read(table)
#VHzQ = VHzQ[np.where( (VHzQ['redshift'] >= 5.7) & (VHzQ['redshift'] <= 6.7))]

redshift = VHzQ['redshift']
g_PS1 = VHzQ['gMag']
r_PS1 = VHzQ['rMag']
i_PS1 = VHzQ['iMag']
z_PS1 = VHzQ['zMag']
y_PS1 = VHzQ['yMag']
Ymag = VHzQ['Yapermag']
Jmag = VHzQ['Jmag']
Hmag = VHzQ['Hmag']
Kmag = VHzQ['Kmag']
W1mag = VHzQ['W1mag']
W2mag = VHzQ['W2mag']
W3mag = VHzQ['W3mag']