예제 #1
0
def galextinction(file, spec):  # Take the ebv of the galaxy from IrsaDust
    name = file
    table = IrsaDust.get_query_table(name, section='ebv')
    ebv = table['ext SFD mean'][0]

    spec.deredden(ebv=ebv)  # Deredden in place
    return spec
예제 #2
0
def correct_for_dust(wavelength, ra, dec):
    """Query IRSA dust map for E(B-V) value and returns reddening array
    ----------
    wavelength : numpy array-like
        Wavelength values for which to return reddening
    ra : float
        Right Ascencion in degrees
    dec : float
        Declination in degrees

    Returns
    -------
    reddening : numpy array

    Notes
    -----
    For info on the dust maps, see http://irsa.ipac.caltech.edu/applications/DUST/
    """

    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ra*u.deg, dec*u.deg, frame='fk5')
    dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv', timeout=60)[0]
    ebv = np.mean(dust_image[0].data[40:42, 40:42])
    r_v = 3.1
    av =  r_v * ebv
    from specutils.extinction import reddening
    return reddening(wavelength* u.angstrom, av, r_v=r_v, model='ccm89'), ebv
예제 #3
0
def correct_for_dust(wavelength, ra, dec):
    """Query IRSA dust map for E(B-V) value and returns reddening array
    ----------
    wavelength : numpy array-like
        Wavelength values for which to return reddening
    ra : float
        Right Ascencion in degrees
    dec : float
        Declination in degrees

    Returns
    -------
    reddening : numpy array

    Notes
    -----
    For info on the dust maps, see http://irsa.ipac.caltech.edu/applications/DUST/
    """

    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ra * u.deg, dec * u.deg, frame='fk5')
    # dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv', timeout=60)[0]
    # ebv = np.mean(dust_image[0].data[40:42, 40:42])
    dust_table = IrsaDust.get_query_table(C, section='ebv', timeout=60)
    ebv = dust_table["ext SandF ref"][0]

    from dust_extinction.parameter_averages import F04
    # initialize the model
    ext = F04(Rv=3.1)
    reddening = 1 / ext.extinguish(wavelength * u.angstrom, Ebv=ebv)
    return reddening, ebv
예제 #4
0
def grabImage(ra, dec):
    imagelist = IrsaDust.get_image_list(SkyCoord(ra, dec).fk5,
                                        image_type="100um",
                                        radius=2 * u.degree)
    image_file = download_file(imagelist[0], cache=True)
    image_data.append(fits.getdata(image_file,
                                   ext=0))  #gets image from IRSA database
예제 #5
0
def radec_extinction(ra, dec, a_lambda=1.0):
    """Estimate the Galactic extinction for HSC filters.

    -----------
    Parameters:
        (ra, dec): (float, float)
            The input coordinates can be arrays

        a_lambda : optional, default=1.0
            Convert the e_bv value into extinction in magnitude unit.

    """
    coords = SkyCoord(ra, dec, frame='icrs', unit='deg')

    try:
        # mwdust by Jo Bovy
        import mwdust
        sfd = mwdust.SFD(sf10=True)
        ebv = sfd(coords.galactic.l.deg, coords.galactic.b.deg, 0)
    except ImportError:
        try:
            # Try querying the IRSA dust map instead
            from astroquery.irsa_dust import IrsaDust
            extinction_tab = IrsaDust.get_extinction_table(coords)
            ebv = (extinction_tab['A_SFD'] / extinction_tab['A_over_E_B_V_SFD'])[1]
        except ImportError:
            raise Exception("# Need mwdust by Jo Bovy or Astroquery")

    return a_lambda * ebv
예제 #6
0
def correct_for_dust(wavelength, ra, dec):
    """Query IRSA dust map for E(B-V) value and returns reddening array
    ----------
    wavelength : numpy array-like
        Wavelength values for which to return reddening
    ra : float
        Right Ascencion in degrees
    dec : float
        Declination in degrees

    Returns
    -------
    reddening : numpy array

    Notes
    -----
    For info on the dust maps, see http://irsa.ipac.caltech.edu/applications/DUST/
    """

    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ra*u.deg, dec*u.deg, frame='fk5')
    dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv')[0]
    ebv = np.mean(dust_image[0].data[40:42,40:42])
    # print(ebv)
    r_v = 3.1
    av =  r_v * ebv
    from specutils.extinction import reddening
    return reddening(wavelength* u.angstrom, av, r_v=r_v, model='ccm89'), ebv
예제 #7
0
def query_bsnip():
	#Creates extinction.dat from bsnip data
	#list of files
	bsnip = glob.glob ('../../data/bsnip/*.flm')
	bsnip_sn = []
	for i in range (len(bsnip)):
		bsnip[i] = bsnip[i].split('-')[0]
		bsnip[i] = bsnip[i].split('/')
		bsnip[i] = bsnip[i][len(bsnip[i])-1]
		if i > 0:#removes redundant sn names
			if bsnip[i-1] != bsnip[i]:
				bsnip_sn.append(bsnip[i])
		b = []
		v = []
	for i in range(len(bsnip_sn)):
		print "looking at",bsnip_sn[i]
		#certain cases for specfic sn names
		if bsnip_sn[i] == 'sn2007s1':
			print "WARNING"
			del bsnip_sn[i:]
			break
		ext = IrsaDust.get_extinction_table(bsnip_sn[i])
		print ext
		print ext[1][0],ext[1][3]
		print ext[2][0],ext[2][3]
		b.append(ext[1][3])
		v.append(ext[2][3])
	#makes table in format 'sn','B','V'
	param = Table([bsnip_sn,b,v])
	ascii.write(param,'bsnip_extinc.dat')
예제 #8
0
def bsnip_edits():
	bsnip = glob.glob ('../../data/bsnip/*.flm')
	bsnip_sn = []
	b = []
	v = []
	for i in range (len(bsnip)):
			bsnip[i] = bsnip[i].split('-')[0]
			bsnip[i] = bsnip[i].split('/')
			bsnip[i] = bsnip[i][len(bsnip[i])-1]
			if i > 0:#removes redundant sn names
				if bsnip[i-1] != bsnip[i]:
					bsnip_sn.append(bsnip[i])
	cut_index = 0
	flag = False
	for i in range(len(bsnip_sn)):
			if (not flag):
				print "passing",bsnip_sn[i]
			#certain cases for specfic sn names
			if bsnip_sn[i] == 'sn2007s1':#want to start at first changed sn
				print "start with",len(bsnip_sn),"sn"
				print "START at index:",i
				cut_index = i
				flag = True #start here 
			if(flag):
				print "looking at",bsnip_sn[i]
				if bsnip_sn[i] == "sn2007s1":
					bsnip_sn[i] = "SNF20071021-000"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s1":
					bsnip_sn[i] = "SNF20080514-002"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008r3":
					bsnip_sn[i] = "sn1989a"###doesn't work
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s3":
					bsnip_sn[i] = "SNF20080825-006"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s4":
					bsnip_sn[i] = "sn1989a"###can't find correct name
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s5":
					bsnip_sn[i] = "SNF20080909-030"
					print "changed to",bsnip_sn[i]
				if bsnip_sn[i] == "sn2008s8":
					bsnip_sn[i] = "sn1989a"###look for correct names
					print "changed to",bsnip_sn[i]
				
				ext = IrsaDust.get_extinction_table(bsnip_sn[i])
				print ext
				print ext[1][0],ext[1][3]
				print ext[2][0],ext[2][3]
				b.append(ext[1][3])
				v.append(ext[2][3])


	print "stopped successfullly?...cutting table"
	del bsnip_sn[:cut_index]
	param = Table([bsnip_sn,b,v])
	print param
	ascii.write(param,'bsnip_extinc_added.dat')
예제 #9
0
def calc_ebv(ra, dec):
    #determine E(B-V) from dust maps
    #ebv = np.zeros(len(ra))
    #mapdir = './'
    #nmap = fits.getdata('data/SFD_dust_4096_ngp.fits')
    #smap = fits.getdata('data/SFD_dust_4096_sgp.fits')

    c = SkyCoord(ra=ra * u.degree, dec=dec * u.degree, frame='icrs')
    coo = c.to_string('hmsdms')
    #l = c_gal.l.degree
    #b = c_gal.b.degree
    #im = IrsaDust.get_images(coo[0],image_type='ebv',radius=2*u.deg)
    #print(im)
    table = IrsaDust.get_query_table(coo[0], section='ebv')
    ebv = table['ext SFD mean'][0]

    #wn = wcs.WCS('data/SFD_dust_4096_ngp.fits')
    #ws = wcs.WCS('data/SFD_dust_4096_sgp.fits')
    #print wn
    #for i in range(0,len(ra)):
    #	l, b = equatorial2galactic(ra[i], dec[i])
    #if b >= 0.0:
    #	ebvmap = fits.getdata('data/SFD_dust_4096_ngp.fits')
    #	w = wn
    #		#hem = ' ngp '
    #else:
    #	ebvmap = fits.getdata('data/SFD_dust_4096_sgp.fits')
    #	w = ws
    #		#hem = ' sgp '
    #x, y = w.all_world2pix(l, b, 1) #converts the position in degrees to pixels coordinates
    #ebv = ebvmap[np.int_(np.round(y)),np.int_(np.round(x))]
    #print ra[i], dec[i], ' > ', l, b, hem, x, y, ebv[i]
    return ebv
예제 #10
0
    def __init__(self, coordinate_or_galaxy):

        """
        This function ...
        :param coordinate_or_galaxy:
        :return:
        """

        # Check whether the user/extinction directory exists
        if not fs.is_directory(extinction_path): fs.create_directory(extinction_path)

        # Get query
        if isinstance(coordinate_or_galaxy, basestring): query = coordinate_or_galaxy
        else: query = coordinate_or_galaxy.to_astropy()

        # Determine the path to the local extinction table
        path = fs.join(extinction_path, str(query))

        # Check if the local file exists
        if not fs.is_file(path):

            # Get the extinction table from IRSA
            self.table = IrsaDust.get_extinction_table(query)

            # Save the table
            tables.write(self.table, path)

        # Load the table
        else: self.table = tables.from_file(path)
    def correct_extinction(self, val, filtr, EBminV=None, mag=False):
        '''Function to correct for Galactic extinction using values from IRSA


        Note:
            Central wavelengths for UVOT filters are taken from `Poole et al. (2008)`_.
            :math:`R_{\lambda}` values are derived using the `York Extinction Solver`_.


        Args:
            val (float): flux or mag requiring extinction correction (flux is assumed, unless ``mag = True``)
            filtr (str): UVOT filter name (vv, uu, bb, w1, m2, w2)
            source_coords (`astropy.coordinates.SkyCoord`_): Source position to be used for querying the amount of extinction

        Returns:
            float: Extinction-corrected flux (erg/s/cm2) or magnitude (mag)

        .. _astropy.coordinates.SkyCoord:
            http://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord
        .. _Poole et al. (2008):
            http://adsabs.harvard.edu/abs/2008MNRAS.383..627P
        .. _York Extinction Solver:
            http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/community/YorkExtinctionSolver/coefficients.cgi
        '''

        central_wav = {
            'uu': 3465.,
            'w1': 2600.,
            'm2': 2246.,
            'w2': 1928.,
            'bb': 4392.,
            'vv': 5468.
        }
        R_lambda = {
            'uu': 4.89172,
            'w1': 6.55663,
            'm2': 9.15389,
            'w2': 8.10997,
            'bb': 4.00555,
            'vv': 2.99692
        }

        #query for the E(B-V) value, unless user specifies one
        if not EBminV:
            from astroquery.irsa_dust import IrsaDust
            extTable = IrsaDust.get_extinction_table(self.source_coords)
            EBminV = np.median(extTable['A_SandF'] /
                               extTable['A_over_E_B_V_SandF'])

        #calculate extinction magnitude
        A_lambda = R_lambda[filtr] * EBminV

        if mag:
            return val - R_lambda[filtr] * EBminV
        else:
            return val * central_wav[filtr] * 10**(R_lambda[filtr] * EBminV /
                                                   2.5)
예제 #12
0
def get_extinction(coords, filters=['PS1_g', 'PS1_r', 'PS1_i']):

    global extdata
    filters = Table([filters], names=['filter'])
    kk = join(extdata, filters, keys=['filter'])
    AEBV = np.array(kk['AEBV2'])

    t = IrsaDust.get_query_table(coords, section='ebv')
    ebv = np.array(t['ext SFD ref'])

    return ebv * AEBV
예제 #13
0
    def atable(self):
        """ Return the extinction in mag in starndard filters """

        #query IRAS service
        #http://irsa.ipac.caltech.edu/applications/DUST/

        if (self.coord == -1):
            print("Error: coordinates must be set")
            exit()

        from astroquery.irsa_dust import IrsaDust
        tab = IrsaDust.get_extinction_table(self.coord)

        return tab
예제 #14
0
    def atable(self):
        
        """ Return the extinction in mag in starndard filters """

        #query IRAS service 
        #http://irsa.ipac.caltech.edu/applications/DUST/
        
        if (self.coord == -1):
            print "Error: coordinates must be set"
            exit()

        from astroquery.irsa_dust import IrsaDust
        tab = IrsaDust.get_extinction_table(self.coord)
    
        return tab
예제 #15
0
def getAVbest(inputcoordinates):
    '''
    Coordinates are input as a single string. Output is the recommended Av value for MW reddening, error, and reference
    '''

    #inputcoordinates = sys.argv[1]
    testCoords = SkyCoord(inputcoordinates, frame='fk5')
    #print('\n-----\nReading input files...')
    inFile = 'Brown_Walker_table_1.dat'
    inTable = pd.read_csv(inFile, header=None, delimiter=' ')
    ra = Angle(inTable.iloc[:, 1])
    dec = Angle(inTable.iloc[:, 2])
    sourceCoords = SkyCoord(ra, dec, frame='fk5')

    #print('Calculating separation from table coordinates')
    separations = testCoords.separation(sourceCoords).arcminute
    # compare to the distances in the table
    within = np.less(separations, inTable.iloc[:, 3])

    # Are any of the input coordinates within the tabulated distance
    # of the coordinates in the table?
    correctedAV = np.where(within, inTable.iloc[:, 4],
                           None)  #get calculated value
    fix = any(within)
    #print('fix?',fix)

    if fix:
        AV = next((item for item in correctedAV if item is not None), None)
        correctedAVerr = np.where(within, inTable.iloc[:, 5],
                                  None)  #get calculated val
        newAVerr = next((item for item in correctedAVerr if item is not None),
                        None)
        AVerr = math.sqrt((int(float(newAVerr)))**2 + (int(AV) * 0.1)**2)
        sources = np.where(within, inTable.iloc[:, 6], None)
        source = next(
            (item for item in sources if item is not None), None) + ",S_F_2011"

    if not fix:
        AVtable = IrsaDust.get_extinction_table(testCoords,
                                                show_progress=False)
        AV = AVtable['A_SandF'][2]
        AVerr = AV * 0.1
        source = 'S_F_2011'

    #print(AV, AVerr, source)
    return (AV, AVerr, source)
예제 #16
0
파일: tde.py 프로젝트: muryelgp/TDEpy
    def get_ebv(self):
        """
        This function return the E(B-V) Galactic extinction in the line of sight at position of the source.


        Returns
        ----------------
        ebv : float
            E(B-V) Galactic extinction in the line of sight at ra and dec given.
        """
        coo = SkyCoord(ra=float(self.ra),
                       dec=float(self.dec),
                       unit=units.deg,
                       frame=FK5)
        table = IrsaDust.get_query_table(coo, section='ebv')
        ebv = table['ext SandF mean'][0]
        return ebv
예제 #17
0
def tableFill(dam, ra, dec):
    curVal = [None] * 5  #n = 0, e = 1, s = 2, w = 3
    coord = [None] * 5  #n = 0, e = 1, s = 2, w = 3
    #get values for each arcminute
    for j in range(dam, dam + 1):  #change 1st dam to 0 for concurrent values
        coord = fourCoord(j, ra, dec, coord)
        for i in range(0, 5):
            try:
                C = coordinates.SkyCoord(coord[i])
                table = IrsaDust.get_extinction_table(C.fk5,
                                                      show_progress=False)
                curVal[i] = (table['A_SandF'][2])
            except Exception as e:
                curVal = [None] * 5
                break
        # output1.write('\n')
    return curVal
예제 #18
0
def tableFill(distance, ra, dec, appender, nme):
    """
		IN THE PROCESS OF CLEANING THIS UP
		Things I think I don't need:
			Table
	"""
    t = Table(None)
    Am = Column(name='Arcminute')
    North = Column(name='North')
    East = Column(name='East')
    South = Column(name='South')
    West = Column(name='West')
    t.add_columns([Am, North, East, South, West])
    tA_v = []
    curVal = [None] * 4  #n = 0, e = 1, s = 2, w = 3
    cardinals = [None] * 4  #n = 0, e = 1, s = 2, w = 3
    #get values for each arcminute
    for j in range(0, distance + 1):
        fourCoord(j, ra, dec, coord)
        t.add_row()
        t[j][0] = j
        for i in range(0, 4):
            C = coordinates.SkyCoord(coord[i])
            table = IrsaDust.get_extinction_table(C.fk5, show_progress=False)
            curVal[i] = (table['A_SandF'][2])
            t[j][i + 1] = curVal[i]
            curVal = curVal[:]
        tA_v.append(curVal)

    t.add_row()
    for i in range(
            0, 5):  #this adds a blank line to the table to separate queries
        t[j + 1][i] = None
    n = [nme]
    namesTable = Table([n], names=('n'))
    final_name = namesTable.to_pandas()
    final_vals = t.to_pandas()
    from pandas import ExcelWriter
    with open('A_v Values.csv', appender) as f:
        final_name.to_csv(f, header=False, index=False)
    appender = 'a'
    with open('A_v Values.csv', appender) as f:
        final_vals.to_csv(f, header=True, index=False, sep=',')
    return (tA_v)  #gets the data from IRSA database and stores A_v in array
예제 #19
0
    def getebv(self):
        """
        Given an RA/DEC position return EBV in tables
        Written by MF in Durham, Oct 2014. 
        
        """
        #query IRAS service
        #http://irsa.ipac.caltech.edu/applications/DUST/
        #parse table to get the mean/std of
        #SandF = Schlafly & Finkbeiner 2011 (ApJ 737, 103) [ext SandF mean,ext SandF std]
        #SFD   = Schlegel et al. 1998 (ApJ 500, 525) [ext SFD mean,ext SFD std]

        if (self.coord == -1):
            print("Error: coordinates must be set")
            exit()

        from astroquery.irsa_dust import IrsaDust
        ebv = IrsaDust.get_query_table(self.coord, section='ebv')
        self.ebv = ebv

        return ebv
예제 #20
0
    def __init__(self, coordinate_or_galaxy):
        """
        This function ...
        :param coordinate_or_galaxy:
        :return:
        """

        # Check whether the user/attenuation directory exists
        if not fs.is_directory(attenuation_path):
            fs.create_directory(attenuation_path)

        # Get query
        if types.is_string_type(coordinate_or_galaxy):
            query = coordinate_or_galaxy
        else:
            query = coordinate_or_galaxy.to_astropy()

        # Determine the path to the local extinction table
        path = fs.join(attenuation_path, str(query))

        # Check if the local file exists
        if not fs.is_file(path):

            # Get the extinction table from IRSA
            self.table = IrsaDust.get_extinction_table(query)

            # Save the table
            tables.write(self.table, path)

        # Load the table
        else:
            self.table = tables.from_file(path)

        # Check whether the RFILTER table is present, load
        if fs.is_file(rfilter_table_path):
            self.rfilter = RFilterTable.from_file(rfilter_table_path)
        else:  # or create new
            self.rfilter = RFilterTable()
            self.rfilter.path = rfilter_table_path
            self.rfilter.save()
예제 #21
0
def query_cfa():
	#gets cfa folder pathnames
	cfa = glob.glob ('../../data/cfa/*')
	#Truncates cfa pathname into needed format and removes cfa.dat files
	del cfa[0]
	del cfa[0]
	for i in range(len(cfa)):
		cfa[i]= cfa[i][15:]
		print cfa[i]
	b = []
	v = []
	for i in range(len(cfa)):
		print "looking at",cfa[i]
		ext = IrsaDust.get_extinction_table(cfa[i])
		print ext
		print ext[1][0],ext[1][3]
		print ext[2][0],ext[2][3]
		b.append(ext[1][3])
		v.append(ext[2][3])
	#makes table in format 'sn','B','V'
	param = Table([cfa,b,v])
	ascii.write(param,'extinctioncfa.dat')
예제 #22
0
    def getebv(self):
        
        """
        Given an RA/DEC position return EBV in tables
        Written by MF in Durham, Oct 2014. 
        
        """
        #query IRAS service 
        #http://irsa.ipac.caltech.edu/applications/DUST/
        #parse table to get the mean/std of 
        #SandF = Schlafly & Finkbeiner 2011 (ApJ 737, 103) [ext SandF mean,ext SandF std]
        #SFD   = Schlegel et al. 1998 (ApJ 500, 525) [ext SFD mean,ext SFD std]

        if (self.coord == -1):
            print "Error: coordinates must be set"
            exit()


        from astroquery.irsa_dust import IrsaDust
        ebv = IrsaDust.get_query_table(self.coord,section='ebv')
        self.ebv=ebv

        return ebv
예제 #23
0
    def __init__(self, tempParentDir=None, debug=True, debugPlot=False, tempSubDir="dust"):
        self.tempParentDir = tempParentDir
        self.tempSubDir = tempSubDir
        self.debug = debug
        self.debugPlot = debugPlot
        self.tempDir = None
        self.outDir = "../out" + os.sep + tempSubDir
        outPath = self.outDir + os.sep + "downloaded.pkl"
        
        self._setupTempDir()
        
        if os.path.exists(outPath):
            with open(outPath, 'rb') as f:
                self.image_list = pickle.load(f)
        else:
            with open(outPath, 'wb') as outfile:
                self.image_list = IrsaDust.get_images("maffei1")
                pickle.dump(self.image_list, outfile)

        self.image_types = [i[0].header['BUNIT'] for i in self.image_list]

        self.ebvIndex = self.image_types.index("mag E(B-V)")
        # Schlafly, E.F. & Finkbeiner, D.P.  2011, ApJ 737, 103 (S and F).
        self.extinctions = { "I": 1.698, "R": 2.285, "Z": 1.263 }
예제 #24
0
def main():
    db = sqlite3.connect('test_schedule_v8_msip.db')
    table = pd.read_sql_query("SELECT * from SUMMARY", db)
    ind = table['subprogram'] == 'all_sky'
    msip = table[ind]

    release_date = '20180622'
    survey = 'ZTF_MSIP'
    filters = ''.join(np.unique(msip['filter']))
    user = '******'
    host = 'grimnir.stsci.edu'
    comment = 'Based on ZTF observing log DB from Eric Bellm, Rahul Biswas on {}'.format(
        release_date)
    pixsize = 1.
    fields = np.unique(msip['fieldID'])
    nlibid = len(fields)

    outlines = []
    outlines.append('SURVEY: {}'.format(survey))
    outlines.append('FILTERS: {}'.format(filters))
    outlines.append('TELESCOPE: ZTF')
    outlines.append('USER: {}'.format(user))
    outlines.append('HOST: {}'.format(host))
    outlines.append('SKYSIG_UNIT: ADU_PER_SQARCSEC')
    outlines.append('PIXSIZE: {:0.1f}'.format(pixsize))
    outlines.append('NLIBID: {}'.format(nlibid))
    outlines.append('COMMENT: {}'.format(comment))
    outlines.append('BEGIN LIBGEN')

    for field in fields:
        outlines.append('# --------------------------------------------')
        # select from table, not MSIP in case some of the other programs
        # observe the same field this may not be useful since we don't have
        # access to non-MSIP data but in principle these observations have been
        # taken and could be used to classify the data

        outlines.append('LIBID: {}'.format(field))
        indf = (table['fieldID'] == field)

        # all the positions appear to be identical, so there's no way to
        # account for dithers or overlaps
        ra = np.unique(table[indf]['fieldRA'])[0]
        dec = np.unique(table[indf]['fieldDec'])[0]

        coo = coord.SkyCoord(ra * u.deg, dec * u.deg, frame='icrs')
        dust = IrsaDust.get_query_table(coo, section='ebv')
        mwebv = dust['ext SandF mean'][0]

        nobs = len(table[indf])
        outlines.append(
            'RA: {}    DEC: {}    NOBS: {}    PIXSIZE: {}    MWEBV: {}    FIELD: {}'
            .format(ra, dec, nobs, pixsize, mwebv, field))
        outlines.append(
            '#                           CCD  CCD         PSF1 PSF2 PSF2/1')
        outlines.append(
            '#     MJD      ID*NEXPOSE  FLT GAIN NOISE SKYSIG (pixels)  RATIO  ZPTAVG ZPTERR  MAG'
        )

        entries = at.Table.from_pandas(table[indf])

        for entry in entries:
            # get some quantities
            flt = entry['filter']
            skymag = entry['filtSkyBright']
            depth = entry['fiveSigmaDepth']
            snr = 5.
            fwhm = entry['FWHMeff']

            term1 = 2.0 * depth - skymag
            term2 = -(depth - skymag)

            # convert FWHM from arcsec to sigma_gaussian in pixels
            sigma_pixel = fwhm / 2.35 / pixsize
            pixel_area = area = (1.51 * fwhm)**2
            arg = pixel_area * snr * snr

            # Background dominated limit assuming counts with system transmission only
            # is approximately equal to counts with total transmission
            zpt_approx = term1 + 2.5 * np.log10(arg)

            tmp = 10.**(-0.4 * term2)
            zpt_cor = 2.5 * np.log10(1. + 1. / (pixel_area * tmp))
            simlib_zptavg = zpt_approx + zpt_cor

            npix_asec = 1. / pixsize**2.
            skysig = np.sqrt(
                (1.0 / npix_asec) * 10.**(-0.4 * (skymag - simlib_zptavg)))

            lst = [
                'S:',
                "{0:5.4f}".format(entry['expMJD']),
                "{0:10d}*2".format(entry['obsHistID']),
                entry['filter'],
                "{0:5.2f}".format(1.),  # CCD Gain
                "{0:5.2f}".format(0.25),  # CCD Noise
                "{0:6.2f}".format(skysig),  # SKYSIG
                "{0:4.2f}".format(sigma_pixel),  # PSF1
                "{0:4.2f}".format(0.),  # PSF2
                "{0:4.3f}".format(0.),  # PSFRatio
                "{0:6.2f}".format(simlib_zptavg),  # ZPTAVG
                "{0:6.3f}".format(0.005),  # ZPTNoise
                "{0:+7.3f}".format(-99.)
            ]  # MAG
            out = ' '.join(lst)
            outlines.append(out)
        outlines.append('END_LIBID: {}'.format(field))
    outlines = '\n'.join(outlines)

    with open('ztf_msip_simlib_{}.dat'.format(release_date), 'w') as f:
        f.write(outlines)
예제 #25
0
cspmini= [] # a shorter list

for i in range(len(csp)):    
    csp[i] = 'sn20'+csp[i][20:24]
#    print csp[i]
        
for sn in csp: # delete duplicate items
    if sn not in cspmini:
        cspmini.append(sn)
    
for i in range(len(cspmini)): # For single-letter SNs
    if cspmini[i].endswith('_') :        
        cspmini[i] = cspmini[i][:-1]
        
print cspmini    
    
b = []
v = []
for i in range(len(cspmini)):
	print "looking at SN",cspmini[i]
	ext = IrsaDust.get_extinction_table(cspmini[i])
	print ext
	print ext[1][0],ext[1][3]
	print ext[2][0],ext[2][3]
	b.append(ext[1][3])
	v.append(ext[2][3])

param = Table([cspmini,b,v],names=('sn','B','V'))
ascii.write(param,'extinctioncsp.dat')
	
예제 #26
0
    filts.append(eff)
    ax1.fill(wlr,eff,label=f.split('.')[0],edgecolor="none",color=filtercolor[i])
ax1.axhline(spec,color="black",lw=3,alpha=.5)
#    ax1.set_xlabel(r"$\lambda$ in $\AA$")
ax1.set_ylabel("Throughput")
ax1.axes.get_xaxis().set_visible(False)
wl=np.sort(wl)

corrections=np.empty((len(filters),len(coords)))
mags_notred=np.empty(len(filters))
mags_red=np.empty((len(filters),len(coords)))
alambdas=[ [[] for _ in coords] for _ in filts]
color=cm.viridis(np.linspace(0,1,len(coords)))
for i,c in enumerate(coords):
  C = coord.SkyCoord(str(c[0])+" "+str(c[1]),unit="deg",frame="fk5")
  table=IrsaDust.get_query_table(C,radius=None)
  eb_v=table["ext SandF mean"]
  #print eb_v.data[0]
  al_plot=f99(wl,eb_v.data[0]*3.1)
  for j,f in enumerate(filts):
      alambdas[j][i]=f99(wls[j],eb_v.data[0]*3.1)
  ax2.plot(wl,al_plot,label=str(c[0])[:6]+" "+str(c[1])[:4],color=color[i])
ax2.set_xlabel(r"$\lambda$ in $\rm \AA$")
ax2.set_ylabel("Extinction in magnitudes")
ax2.set_ylim([0,0.07])
alambdas=np.array(alambdas)

for j,f in enumerate(filts):
    diffs=np.gradient(wls[j])
    flux=sum(wls[j]*spec*f*diffs) #integration
    norm=sum(f*diffs/wls[j]) #normalisation following GALEXEV docs.
예제 #27
0
     pflag = 'GAIADR2'
 elif type(result['PLX_VALUE'][indr]) != np.ma.core.MaskedConstant:
     p = round(float(result['PLX_VALUE'][indr]), 2)
     if type(result['PLX_VALUE'][indr]) != np.ma.core.MaskedConstant:
         perr = round(float(result['PLX_ERROR'][indr]), 2)
     else:
         perr = empty
     pflag = 'Simbad'
 else:
     try:
         pos = coord.SkyCoord(ra=ra, dec=dec,
                              unit=(u.hourangle,u.deg),
                              frame='icrs')
         #AvSF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
         tableAv = IrsaDust.get_query_table(pos,
                                            radius='02d',
                                            section='ebv',
                                            timeout=60)
         Av = tableAv['ext SandF mean'].data[0]
         Averr = tableAv['ext SandF std'].data[0]
     except:
         Av = 0
         Averr = 0
     try:    
         p, perr = [round(x, 2) for x in parallax(Teff,
                                                  Tefferr,
                                                  float(logg),
                                                  float(loggerr),
                                                  V, Verr, M, Merr, Av, Averr)]
         pflag = 'Spec'
     except:
         p = 'NULL'
예제 #28
0
def main():
    """Main script to prepare x-shooter observations for combination"""
    from astropy.io import fits
    import glob
    import matplotlib.pyplot as pl
    from methods import latexify
    latexify()
    import numpy as np
    from xshoo.combine import inter_arm_cut

    #Files
    obj_name = 'SDSS1437-0147'
    root_dir = '/Users/jselsing/Work/X-Shooter/CompositeRedQuasar/processed_data/'+obj_name
    object_files = glob.glob(root_dir+'/OBJECT/*IDP*.fits')
    transmission_files = glob.glob(root_dir+'/transmission*.fits')
    arms = ['UVB', 'VIS', 'NIR']
    wl_out = []
    flux_out = []
    flux_uncorr_out = []
    err_out = []
    start = []
    end = []

    for n in arms:
        print('In arm: '+n)


        #Read in object spectrum
        obser = [k for k in object_files if n in k]
        ob = fits.open(obser[0])
        wl = 10.0*ob[1].data.field('WAVE')[0]
        flux = ob[1].data.field('FLUX')[0]
        err = ob[1].data.field('ERR')[0]

        wl_tmp, flux_uncorr, err_tmp, start_tmp, end_tmp = inter_arm_cut(wl, flux, err, n, start, end)
        if n== 'VIS' or n== 'NIR':
            transmission = fits.open([k for k in transmission_files if n in k][0])[0].data
            for j, k in enumerate(transmission):
                if k <= 1e-10:
                    transmission[j] = 1
            flux /= transmission
            err /= transmission
        wl, flux, err, start, end = inter_arm_cut(wl, flux, err, n, start, end)

        wl_out.append(wl)
        flux_out.append(flux)
        err_out.append(err)
        flux_uncorr_out.append(flux_uncorr)

    wl_out = np.hstack(wl_out)
    flux_out = np.hstack(flux_out)
    err_out = np.hstack(err_out)
    flux_uncorr_out = np.hstack(flux_uncorr_out)

    bp_map = []
    for j , (k, l) in enumerate(zip(flux_out[:-1],err_out[:-1])):
        if k > 1.1 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        elif k < 0.90 * flux_out[j-1] or k < 0:
           bp_map.append(1)
        else:
           bp_map.append(0)
    bp_map.append(1)

    import json
    import urllib2

    query_terms = dict()
    query_terms["ra"] = str(ob[0].header['RA'])+'d' #"185.1d"
    query_terms["dec"] = str(ob[0].header['DEC'])  #"56.78"
    query_terms["radius"] = "5.0"

    url = "http://api.sdss3.org/spectrumQuery?" + '&'.join(["{0}={1}".format(key, value) for key, value in query_terms.items()])
    print(url)
    # make call to API
    response = urllib2.urlopen(url)

    # read response, converting JSON format to Python list
    matching_ids = json.loads(response.read())
    print(json.dumps(matching_ids, indent=4))

    # get the first id
    spec_id = matching_ids[0]

    url = "http://api.sdss3.org/spectrum?id={0}&format=json".format(spec_id)

    response = urllib2.urlopen(url)
    result = json.loads(response.read())
    SDSS_spectrum = result[spec_id]

    wl_sdss = np.array(SDSS_spectrum["wavelengths"])
    flux_sdss =  np.array(SDSS_spectrum["flux"])
    z_sdss = (np.array(SDSS_spectrum["z"]))
    z_sdss_err = ((np.array(SDSS_spectrum["z_err"])))

    #Insert zeros
    wl_sdss = np.concatenate([wl_sdss,np.zeros(len(wl_out) - len(wl_sdss))])
    flux_sdss = np.concatenate([flux_sdss,np.zeros(len(flux_out) - len(flux_sdss))])

    # Load linelist
    fit_line_positions = np.genfromtxt('data/fitlinelist.txt', dtype=None)
    linelist = []
    for n in fit_line_positions:
        linelist.append(n[1])
    linelist = np.array(linelist)

    from methods import wavelength_conversion
    linelist = wavelength_conversion(linelist, conversion='vacuum_to_air')

    #Cut out fitting region
    mask = np.logical_and(wl_out > 11350, (wl_out < 11750))
    wl_fit = wl_out[mask]
    flux_fit = flux_out[mask]
    fluxerr_fit = err_out[mask]

    fluxerr_new = []
    for j, (k, l) in enumerate(zip(flux_fit,fluxerr_fit)):
        if k > 1.5 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        elif k < 0.75 * flux_fit[j-2] and k > 0:
            fluxerr_new.append(l*50)
        else:
            fluxerr_new.append(l)
    from gen_methods import smooth
    fluxerr_fit = smooth(np.array(fluxerr_new), window_len=15, window='hanning')

    #Fit continuum and subtract
    from methods import continuum_fit
    from numpy.polynomial import chebyshev
    cont, chebfit = continuum_fit(wl_fit, flux_fit, fluxerr_fit, edge_mask_len=20)
    chebfitval = chebyshev.chebval(wl, chebfit)


    #Define models to use
    from methods import voigt,gauss
    def model1(t,  amp2, sig22g, sig22l, z):
            tmp = voigt(t, abs(amp2), (1+z)*linelist[2], sig22g, sig22l)
            return tmp

    def model2(t, amp2, sig22g, z):
            tmp = gauss(t, abs(amp2), (1+z)*linelist[2], sig22g)
            return tmp

    #Initial parameters
    init_vals = [6e-12,100, z_sdss]
    y_fit_guess = model2(wl_fit, *init_vals) + cont

    #Fit
    import scipy.optimize as op
    np.random.seed(12345)
    y_op = []
    vals = []
    for i in np.arange(10000):
        print('Iteration: ', i)
        resampled_spec = np.random.normal(flux_fit, abs(fluxerr_fit))

        cont, chebfit = continuum_fit(wl_fit, resampled_spec, fluxerr_fit, edge_mask_len=20)
        chebfitval = chebyshev.chebval(wl, chebfit)

        best_vals, covar = op.curve_fit(model2, wl_fit, resampled_spec - cont, sigma=fluxerr_fit, absolute_sigma=True, p0=init_vals)
        vals.append(best_vals)


    up = (np.percentile(vals, 84, axis = 0)[2] - np.mean(vals, axis = 0)[2])
    down = (np.percentile(vals, 16, axis = 0)[2] - np.mean(vals, axis = 0)[2])



    v_bary = ob[0].header['HIERARCH ESO QC VRAD BARYCOR']
    c_km = (2.99792458e8/1000.0)

    print("""Curve_fit results:
        Redshift = {0} + {1} - {2} (SDSS: {3} +- {4})
    """.format(np.mean(vals, axis = 0)[2] + v_bary /c_km, up, down, z_sdss, z_sdss_err))

    z_op = np.mean(vals, axis = 0)[2] + v_bary /c_km



    #Correct for Lyman alpha forest absorption. Will only have data for objects with z > 3000 / 1216 -1 ~ 1.5
    mask = (wl_out < (1 + z_op)*1216)
    wave = wl_out[mask]
    flux = flux_out[mask]
    import continuum_mark.interactive
    normalise = continuum_mark.interactive.continuum_mark(wl_out[mask], flux_out[mask], err_out[mask])
    normalise.endpoint = 'n' #str(raw_input('Insert endpoint before interpolation(y/n)? '))

    normalise.run()
    pl.show()
    cont_out = np.concatenate([normalise.continuum,flux_out[~mask]])



    #Flag whether to use estimated redshift
    flag = 1


    from astroquery.irsa_dust import IrsaDust
    import astropy.coordinates as coord
    import astropy.units as u
    C = coord.SkyCoord(ob[0].header['RA']*u.deg, ob[0].header['DEC']*u.deg, frame='fk5')
    dust_image = IrsaDust.get_images(C, radius=2 *u.deg, image_type='ebv')[0]
    ebv = np.mean(dust_image[0].data[40:42,40:42])


    # Saving telluric uncorrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_uncorr_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_uncorrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )



    #Saving telluric corrected data to .dat file
    dt = [("wl", np.float64), ("flux", np.float64), ("error", np.float64), ("bp map", np.float64),
          ("wl_sdss", np.float64), ("flux_sdss", np.float64) , ("flux_cont", np.float64) ]
    data = np.array(zip(wl_out, flux_out, err_out, bp_map, wl_sdss, flux_sdss, cont_out), dtype=dt)
    file_name = "Telluric_corrected_science"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="wl flux fluxerror bp_map wl_sdss flux_sdss cont")#, fmt = ['%5.1f', '%2.15E'] )


    #Saving info to .dat file
    dt = [("z_op", np.float64), ("z_sdss", np.float64), ("flag", np.float64), ("ebv", np.float64)]
    data = np.array(zip([z_op], [z_sdss], [flag], [ebv]), dtype=dt)
    file_name = "Object_info"
    np.savetxt(root_dir+"/"+file_name+".dat", data, header="z_op z_sdss flag ebv ") #, fmt = ['%5.1f', '%2.15E'] )
예제 #29
0
	def GetAndUploadAllData(self,objs,ras,decs,doNED=True):
		TransientUploadDict = {}

		assert len(ras) == len(decs)

		if type(ras[0]) == float:
			scall = SkyCoord(ras,decs,frame="fk5",unit=u.deg)
		else:
			scall = SkyCoord(ras,decs,frame="fk5",unit=(u.hourangle,u.deg))

		ebvall,nedtables = [],[]
		ebvtstart = time.time()
		if doNED:
			for sc in scall:
				dust_table_l = IrsaDust.get_query_table(sc)
				ebvall += [dust_table_l['ext SandF mean'][0]]
				try:
					ned_region_table = Ned.query_region(sc, radius=self.nedradius*u.arcmin, equinox='J2000.0')
				except:
					ned_region_table = None
				nedtables += [ned_region_table]
			print('E(B-V)/NED time: %.1f seconds'%(time.time()-ebvtstart))

		tstart = time.time()
		TNSData = []
		json_data = []
		for j in range(len(objs)):
			TNSGetSingle = [("objname",objs[j]),
							("photometry","1"),
							("spectra","1")]

			response=get(self.tnsapi, TNSGetSingle, self.tnsapikey)
			json_data += [format_to_json(response.text)]
		print(time.time()-tstart)
		
		print('getting TNS content takes %.1f seconds'%(time.time()-tstart))

		for j,jd in zip(range(len(objs)),json_data):
			tallstart = time.time()

			obj = objs[j]

			iobj = np.where(obj == np.array(objs))[0]
			if len(iobj) > 1: iobj = int(iobj[0])
			else: iobj = int(iobj)
			
			if doNED: sc,ebv,nedtable = scall[iobj],ebvall[iobj],nedtables[iobj]
			else: sc = scall[iobj]; ebv = None; nedtable = None
			
			print("Object: %s\nRA: %s\nDEC: %s" % (obj,ras[iobj],decs[iobj]))
			
			########################################################
			# For Item in Email, Get NED
			########################################################
			if type(jd['data']['reply']['name']) == str:
				jd = jd['data']['reply']
			else:
				jd = None

			transientdict = self.getTNSData(jd,obj,sc,ebv)
			try:
				photdict = self.getZTFPhotometry(sc)
			except: photdict = None
			try:
				if jd:
					photdict,nondetectdate,nondetectmaglim,nondetectfilt,nondetectins = \
						self.getTNSPhotometry(jd,PhotUploadAll=photdict)
					specdict = self.getTNSSpectra(jd,sc)
					transientdict['transientphotometry'] = photdict
					transientdict['transientspectra'] = specdict

					if nondetectdate: transientdict['non_detect_date'] = nondetectdate
					if nondetectmaglim: transientdict['non_detect_limit'] = nondetectmaglim
					if nondetectfilt: transientdict['non_detect_band'] =  nondetectfilt
					if nondetectfilt: transientdict['non_detect_instrument'] =	nondetectins
			except: pass

			try:
				if doNED:
					hostdict,hostcoords = self.getNEDData(jd,sc,nedtable)
					transientdict['host'] = hostdict
					transientdict['candidate_hosts'] = hostcoords
			except: pass
	
			TransientUploadDict[obj] = transientdict

		TransientUploadDict['noupdatestatus'] = self.noupdatestatus
		self.UploadTransients(TransientUploadDict)
		
		return(len(TransientUploadDict))
예제 #30
0
def sed(kic, ra, dec, ax):
    """
    Retrieves star's magnitudes in different bands from ExoFOP and fits SED from Castelli & Kurucz library.
    Note: you need an account to access Kepler ExoFOP data. I do not have one, so I'm using the format for K2 targets
    instead.

    :param kic: K2 EPIC ID of target.
    :param ra: RA of target.
    :param dec: Dec of target.
    :param ax: plot handle of SED plot.

    :return: ax: SED plot.
    """

    urllib.urlretrieve(
        'http://vizier.u-strasbg.fr/viz-bin/sed?-c=' + ra + "%2C" + dec +
        '&-c.rs=1', kic + '_sed.vot')
    print 'http://vizier.u-strasbg.fr/viz-bin/sed?-c=' + ra + "%2C" + dec + '&-c.rs=0.005'

    tb = votable.parse_single_table(kic + '_sed.vot')
    data = tb.array
    wav_all = 3e5 * 1e4 / data['_sed_freq'].data  # angstrom
    f_all = data['_sed_flux'].data
    unc_all = data['_sed_eflux'].data
    filters = data['_sed_filter'].data

    filter_dict = {'2MASS:Ks': '2MASS Ks', '2MASS:J': '2MASS J', '2MASS:H': '2MASS H', 'WISE:W1': 'WISE-1',
                   'WISE:W2': 'WISE-2', 'SDSS:u': 'SDSS u', 'SDSS:g': 'SDSS g', \
                   'SDSS:r': 'SDSS r', 'SDSS:i': 'SDSS i', 'SDSS:z': 'SDSS z'}

    c = coord.SkyCoord(float(ra) * u.deg, float(dec) * u.deg, frame='icrs')
    tb = IrsaDust.get_extinction_table(c)
    filters2 = tb['Filter_name']
    allA = tb['A_SandF']
    A = []
    f_ob_orig = []
    wav_ob = []
    unc = []

    for f in filters:
        if f in filter_dict.keys():
            filtmatch = filter_dict[f]
            ind = np.where(filters2 == filtmatch)[0]
            A.append(np.mean(allA[ind]))
            ind = np.where(filters == f)[0]
            f_ob_orig.append(np.mean(f_all[ind]))
            wav_ob.append(np.mean(wav_all[ind]))
            unc.append(np.mean(unc_all[ind]))

    f_ob_orig = np.array(f_ob_orig)
    A = np.array(A)
    wav_ob = np.array(wav_ob)
    f_ob = (f_ob_orig * 10**(A / 2.5))

    metallicity = ['ckp00']
    m = [0.0]
    t = np.arange(3500, 13000, 250)
    t2 = np.arange(14000, 50000, 1000)
    t = np.concatenate((t, t2))

    log_g = ['g20', 'g25', 'g30', 'g35', 'g40', 'g45', 'g50']
    g = np.arange(2., 5., 0.5)

    best_m = 0
    best_t = 0
    best_g = 0
    best_off = 0.0
    chi2_rec = 1e6

    # Do grid search to find best-fit SED
    # This loop can probably be parallelized to save time
    for im, mval in enumerate(m):
        for it, tval in enumerate(t):
            for ig, gval in enumerate(g):
                # load model
                hdulist = pyfits.open('fits/' + metallicity[im] + '/' +
                                      metallicity[im] + '_' + str(tval) +
                                      '.fits')
                data = hdulist[1].data
                wmod = data['WAVELENGTH']
                fmod = data[log_g[ig]] * 3.34e4 * wmod**2

                # fit observations
                f_int = np.exp(
                    np.interp(np.log(wav_ob), np.log(wmod), np.log(fmod)))
                offsets = np.linspace(np.log(min(f_ob / f_int)),
                                      np.log(max(f_ob / f_int)), 51)
                for i_off, offset in enumerate(offsets):
                    chi2 = sum((f_int * np.exp(offset) - f_ob)**2)

                    print 'chi2=', chi2, mval, tval, gval
                    if chi2 < chi2_rec:
                        chi2_rec = chi2
                        best_m = im
                        best_g = ig
                        best_t = it
                        best_off = offset

    print 'best fit: m=', m[best_m], 'T=', t[best_t], 'log g=', g[best_g]

    hdulist = pyfits.open('fits/' + metallicity[best_m] + '/' +
                          metallicity[best_m] + '_' + str(t[best_t]) + '.fits')
    data = hdulist[1].data
    wmod = data['WAVELENGTH']
    fmod = data[log_g[best_g]] * 3.34e4 * wmod**2
    fmod *= np.exp(best_off)

    ax.plot(wmod / 1e4, fmod, label='Castelli & Kurucz model')
    ax.set_xscale('log')
    ax.plot(wav_ob / 1e4,
            f_ob_orig,
            lw=0,
            marker='s',
            label='Uncorrected',
            ms=10)
    ax.plot(wav_ob / 1e4,
            f_ob,
            lw=0,
            marker='o',
            label='Corrected for extinction',
            ms=10)
    ax.set_xlabel(r'${\rm Wavelength} \ (\mu m)}$', fontsize=18)
    ax.set_xlim(0.1, max(wmod) / 1e4)
    ax.set_ylabel(r'$F_{\nu} \ {\rm (Jy)}$', fontsize=18)
    ax.legend()
    return ax
예제 #31
0
	def ProcessTNSEmails(self,post=True,posturl=None,db=None):
		body = ""
		html = ""
		tns_objs = []
		radius = 5 # arcminutes


		
		########################################################
		# Get All Email
		########################################################
		mail =	imaplib.IMAP4_SSL('imap.gmail.com', 993) #, ssl_context=ctx
		
		## NOTE: This is not the way to do this. You will want to implement an industry-standard login step ##
		mail.login(self.login, self.password)
		mail.select('TNS', readonly=False)
		retcode, msg_ids_bytes = mail.search(None, '(UNSEEN)')
		msg_ids = msg_ids_bytes[0].decode("utf-8").split(" ")

		try:
			if retcode != "OK" or msg_ids[0] == "":
				raise ValueError("No messages")

		except ValueError as err:
			print("%s. Exiting..." % err.args)
			mail.close()
			mail.logout()
			del mail
			print("Process done.")
			return
			
		for i in range(len(msg_ids)):
			########################################################
			# Iterate Over Email
			########################################################
			typ, data = mail.fetch(msg_ids[i],'(RFC822)')
			msg = email.message_from_bytes(data[0][1])
			# Mark messages as "Unseen"
			# result, wdata = mail.store(msg_ids[i], '-FLAGS', '\Seen')
				
			if msg.is_multipart():
				for part in msg.walk():
					ctype = part.get_content_type()
					cdispo = str(part.get('Content-Disposition'))
					
					# skip any text/plain (txt) attachments
					if ctype == 'text/plain' and 'attachment' not in cdispo:
						body = part.get_payload(decode=True)  # decode
						break
			# not multipart - i.e. plain text, no attachments, keeping fingers crossed
			else:
				body = msg.get_payload(decode=True)

			objs = re.findall(reg_obj,body)
			print(objs)
			ras = re.findall(reg_ra,body)
			print(ras)
			decs = re.findall(reg_dec,body)
			print(decs)
			
			try:
				########################################################
				# For Item in Email, Get TNS
				########################################################

				for j in range(len(objs)):
					print("Object: %s\nRA: %s\nDEC: %s" % (objs[j].decode('utf-8'),
														   ras[j].decode('utf-8'),
														   decs[j].decode('utf-8')))
				
					# Get TNS page
					int_name=""
					evt_type=""
					z=""
					host_name=""
					host_redshift = ""
					ned_url = ""
			
					tns_url = "https://wis-tns.weizmann.ac.il/object/" + objs[j].decode("utf-8")
					print(tns_url)
					
					tstart = time.time()
					try:
						response = requests.get(tns_url,timeout=20)
						html = response.content
					except:
						print('trying again')
						response = requests.get(tns_url,timeout=20)
						html = response.content

					soup = BeautifulSoup(html, "lxml")
					
					# Get Internal Name, Type, Disc. Date, Disc. Mag, Redshift, Host Name, Host Redshift, NED URL
					int_name = soup.find('td', attrs={'class':'cell-internal_name'}).text
					evt_type = soup.find('div', attrs={'class':'field-type'}).find('div').find('b').text
					evt_type = evt_type #.replace(' ','')
					disc_date = soup.find('div', attrs={'class':'field field-discoverydate'}).find('div').find('b').text
					disc_mag = soup.find('div', attrs={'class':'field field-discoverymag'}).find('div').find('b').text
					try: source_group = soup.find('div', attrs={'class':'field field-source_group_name'}).find('div').find('b').text
					except AttributeError: source_group = "Unknown"
					try: disc_filter = soup.find('td', attrs={'cell':'cell-filter_name'}).text
					except AttributeError: disc_filter = "Unknown"
					if '-' in disc_filter:
						disc_instrument = disc_filter.split('-')[1]
					else: disc_instrument = 'Unknown'

					# lets pull the photometry
					nondetectmaglim = None
					nondetectdate = None
					nondetectfilt = None
					tmag,tmagerr,tflux,tfluxerr,tfilt,tinst,tobsdate = \
						np.array([]),np.array([]),np.array([]),np.array([]),\
						np.array([]),np.array([]),np.array([])
					try:
						tables = soup.find_all('table',attrs={'class':'photometry-results-table'})
						for table in tables:
							data = []
							table_body = table.find('tbody')
							header = table.find('thead')
							headcols = header.find_all('th')
							header = np.array([ele.text.strip() for ele in headcols])
							#header.append([ele for ele in headcols if ele])
							rows = table_body.find_all('tr')

							for row in rows:
								cols = row.find_all('td')
								data.append([ele.text.strip() for ele in cols])

							for datarow in data:
								datarow = np.array(datarow)
								if photkeydict['unit'] in header:
									if 'mag' in datarow[header == photkeydict['unit']][0].lower():
										if photkeydict['magflux'] in header:
											tmag = np.append(tmag,datarow[header == photkeydict['magflux']])
											tflux = np.append(tflux,'')
										else:
											tmag = np.append(tmag,'')
											tflux = np.append(tflux,'')
										if photkeydict['magfluxerr'] in header:
											tmagerr = np.append(tmagerr,datarow[header == photkeydict['magfluxerr']])
											tfluxerr = np.append(tfluxerr,'')
										else:
											tmagerr = np.append(tmagerr,None)
											tfluxerr= np.append(tfluxerr,'')
									elif 'flux' in datarow[header == photkeydict['unit']][0].lower():
										if photkeydict['magflux'] in header:
											tflux = np.append(tflux,datarow[header == photkeydict['magflux']])
											tmag = np.append(tmag,'')
										else:
											tflux = np.append(tflux,'')
											tmag = np.append(tmag,'')
										if photkeydict['magfluxerr'] in header:
											tfluxerr = np.append(tfluxerr,datarow[header == photkeydict['magfluxerr']])
											tmagerr = np.append(tmagerr,'')
										else:
											tfluxerr = np.append(tfluxerr,None)
											tmagerr = np.append(tmagerr,'')
								if photkeydict['filter'] in header:
									tfilt = np.append(tfilt,datarow[header == photkeydict['filter']])
								if photkeydict['inst'] in header:
									tinst = np.append(tinst,datarow[header == photkeydict['inst']])
								if photkeydict['obsdate'] in header:
									tobsdate = np.append(tobsdate,datarow[header == photkeydict['obsdate']])
								if photkeydict['remarks'] in header and photkeydict['maglim'] in header:
									if 'last' in datarow[header == photkeydict['remarks']][0].lower() and \
									   'non' in datarow[header == photkeydict['remarks']][0].lower() and \
									   'detection' in datarow[header == photkeydict['remarks']][0].lower():
										nondetectmaglim = datarow[header == photkeydict['maglim']][0]
										nondetectdate = datarow[header == photkeydict['obsdate']][0]
										nondetectfilt = datarow[header == photkeydict['filter']][0]

						# set the discovery flag
						disc_flag = np.zeros(len(tmag))
						iMagsExist = np.where(tmag != '')[0]
						if len(iMagsExist) == 1: disc_flag[np.where(tmag != '')] = 1
						elif len(iMagsExist) > 1:
							mjd = np.zeros(len(iMagsExist))
							for d in range(len(mjd)):
								mjd[d] = date_to_mjd(tobsdate[d])
							iMinMJD = np.where(mjd == np.min(mjd))[0]
							if len(iMinMJD) > 1: iMinMJD = [iMinMJD[0]]
							for im,iim in zip(iMagsExist,range(len(iMagsExist))):
								if len(iMinMJD) and iim == iMinMJD[0]:
									disc_flag[im] = 1

					except:
						print('Error : couldn\'t get photometry!!!')
									
					z = soup.find('div', attrs={'class':'field-redshift'}).find('div').find('b').text

					hn_div = soup.find('div', attrs={'class':'field-hostname'})
					if hn_div is not None:
						host_name = hn_div.find('div').find('b').text

					z_div = soup.find('div', attrs={'class':'field-host_redshift'})
					if z_div is not None:
						host_redshift = z_div.find('div').find('b').text

					ned_url = soup.find('div', attrs={'class':'additional-links clearfix'}).find('a')['href']
					
					# Get photometry records
					table = soup.findAll('table', attrs={'class':'photometry-results-table'})
					prs = []
					for k in range(len(table)):

						table_body = table[k].find('tbody')
						rows = table_body.find_all('tr')
						print(type(rows))
						
						for l in range(len(rows)):
							prs.append(phot_row(rows[l]))
					
					########################################################
					# For Item in Email, Get NED
					########################################################
					ra_j = ras[j].decode("utf-8")
					dec_j = decs[j].decode("utf-8")
			
					co = coordinates.SkyCoord(ra=ra_j, dec=dec_j, unit=(u.hour, u.deg), frame='fk4', equinox='J2000.0')
					dust_table_l = IrsaDust.get_query_table(co)
					ebv = dust_table_l['ext SandF mean'][0]
					ned_region_table = None
				
					gal_candidates = 0
					radius = 5
					while (radius < 11 and gal_candidates < 21): 
						try:
							print("Radius: %s" % radius)
							ned_region_table = Ned.query_region(co, radius=radius*u.arcmin, equinox='J2000.0')
							gal_candidates = len(ned_region_table)
							radius += 1
							print("Result length: %s" % gal_candidates)
						except Exception as e:
							radius += 1
							print("NED exception: %s" % e.args)

					galaxy_names = []
					galaxy_zs = []
					galaxy_seps = []
					galaxies_with_z = []
					galaxy_ras = []
					galaxy_decs = []
					galaxy_mags = []
					if ned_region_table is not None:
						print("NED Matches: %s" % len(ned_region_table))

						galaxy_candidates = np.asarray([entry.decode("utf-8") for entry in ned_region_table["Type"]])
						galaxies_indices = np.where(galaxy_candidates == 'G')
						galaxies = ned_region_table[galaxies_indices]
						
						print("Galaxy Candidates: %s" % len(galaxies))

						# Get Galaxy name, z, separation for each galaxy with z
						for l in range(len(galaxies)):
							if isinstance(galaxies[l]["Redshift"], float):
								galaxies_with_z.append(galaxies[l])
								galaxy_names.append(galaxies[l]["Object Name"])
								galaxy_zs.append(galaxies[l]["Redshift"])
								galaxy_seps.append(galaxies[l]["Distance (arcmin)"])
								galaxy_ras.append(galaxies[l]["RA(deg)"])
								galaxy_decs.append(galaxies[l]["DEC(deg)"])
								galaxy_mags.append(galaxies[l]["Magnitude and Filter"])
								
						print("Galaxies with z: %s" % len(galaxies_with_z))
						# Get Dust in LoS for each galaxy with z
						if len(galaxies_with_z) > 0:
							for l in range(len(galaxies_with_z)):
								co_l = coordinates.SkyCoord(ra=galaxies_with_z[l]["RA(deg)"], 
															dec=galaxies_with_z[l]["DEC(deg)"], 
															unit=(u.deg, u.deg), frame='fk4', equinox='J2000.0')

						else:
							print("No NED Galaxy hosts with z")

					tns_objs.append(tns_obj(name = objs[j].decode("utf-8"),
											tns_url = tns_url,
											internal_name = int_name,
											event_type = evt_type,
											ra = ras[j].decode("utf-8"),
											dec = decs[j].decode("utf-8"),
											ebv = ebv,
											z = z,
											tns_host = host_name, 
											tns_host_z = host_redshift,
											ned_nearest_host = galaxy_names, 
											ned_nearest_z = galaxy_zs,
											ned_nearest_sep = galaxy_seps,
											discovery_date = disc_date,
											phot_rows = prs, 
											disc_mag = disc_mag
											))

					if post:
						snid = objs[j].decode("utf-8")
						# if source_group doesn't exist, we need to add it
						groupid = db.get_ID_from_DB('observationgroups',source_group)
						if not groupid:
							groupid = db.get_ID_from_DB('observationgroups','Unknown')#db.post_object_to_DB('observationgroup',{'name':source_group})

						# get the status
						statusid = db.get_ID_from_DB('transientstatuses','New')
						if not statusid: raise RuntimeError('Error : not all statuses are defined')
						
						# put in the hosts
						hostcoords = ''; hosturl = ''; ned_mag = ''
						for z,name,ra,dec,sep,mag in zip(galaxy_zs,galaxy_names,galaxy_ras,galaxy_decs,galaxy_seps,galaxy_mags):
							if sep == np.min(galaxy_seps):
								hostdict = {'name':name,'ra':ra,'dec':dec,'redshift':z}
								hostoutput = db.post_object_to_DB('host',hostdict,return_full=True)
								hosturl = hostoutput['url']
								ned_mag = mag
								
							hostcoords += 'ra=%.7f, dec=%.7f\n'%(ra,dec)

						# put in the spec type
						eventid = db.get_ID_from_DB('transientclasses',evt_type)
						if not eventid:
							eventid = db.get_ID_from_DB('transientclasses','Unknown')#db.post_object_to_DB('transientclasses',{'name':evt_type})
							
						# first check if already exists
						dbid = db.get_ID_from_DB('transients',snid)
						k2id = db.get_ID_from_DB('internalsurveys','K2')
						# then POST or PUT, depending
						# put in main transient
						sc = SkyCoord(ras[j].decode("utf-8"),decs[j].decode("utf-8"),FK5,unit=(u.hourangle,u.deg))
						db.options.best_spec_classapi = db.options.transientclassesapi

						newobjdict = {'name':objs[j].decode("utf-8"),
							      'ra':sc.ra.deg,
							      'dec':sc.dec.deg,
							      #'status':statusid,
							      'obs_group':groupid,
							      'host':hosturl,
							      'candidate_hosts':hostcoords,
							      'best_spec_class':eventid,
							      'TNS_spec_class':evt_type,
							      'mw_ebv':ebv,
							      'disc_date':disc_date.replace(' ','T'),
							      'tags':[]}
						if nondetectdate: newobjdict['non_detect_date'] = nondetectdate.replace(' ','T')
						if nondetectmaglim: newobjdict['non_detect_limit'] = nondetectmaglim
						if nondetectfilt:
							nondetectid = db.get_ID_from_DB('photometricbands',nondetectfilt)
							if nondetectid:
								newobjdict['non_detect_filter'] =  nondetectid

						if dbid:
							# if the status is ignore, we're going to promote this to new
							status_getid = db.get_key_from_object(dbid,'status')
							statusname = db.get_key_from_object(status_getid,'name')
							if statusname == 'Ignore':
								newobjdict['status'] = statusid
							transientid = db.patch_object_to_DB('transient',newobjdict,dbid)
						else:
							newobjdict['status'] = statusid
							transientid = db.post_object_to_DB('transient',newobjdict)

						# only add in host info and photometry if galaxy wasn't already in the database
						# (avoids duplicates)
						if not dbid:
							# the photometry table probably won't exist, so add this in
							# phot table needs an instrument, which needs a telescope, which needs an observatory
							for ins in np.unique(tinst):
								instrumentid = db.get_ID_from_DB('instruments',ins)
								if not instrumentid:
									instrumentid = db.get_ID_from_DB('instruments','Unknown')
								if not instrumentid:
									observatoryid = db.post_object_to_DB(
										'observatory',{'name':'Unknown','tz_name':0,'utc_offset':0})
									teldict= {'name':'Unknown',
										  'observatory':observatoryid,
										  'longitude':0,
										  'latitude':0,
										  'elevation':0}
									telid = db.post_object_to_DB('telescope',teldict)
									instrumentid = db.post_object_to_DB(
										'instrument',{'name':'Unknown','telescope':telid})

								phottabledict = {'transient':transientid,
										 'obs_group':groupid,
										 'instrument':instrumentid}
								phottableid = db.post_object_to_DB('photometry',phottabledict)

								for f in np.unique(tfilt):
									bandid = db.get_ID_from_DB('photometricbands',f)
									if not bandid:
										bandid = db.post_object_to_DB('band',{'name':f,'instrument':instrumentid})
						
									# put in the photometry
									for m,me,f,fe,od,df in zip(tmag[(f == tfilt) & (ins == tinst)],
															   tmagerr[(f == tfilt) & (ins == tinst)],
															   tflux[(f == tfilt) & (ins == tinst)],
															   tfluxerr[(f == tfilt) & (ins == tinst)],
															   tobsdate[(f == tfilt) & (ins == tinst)],
															   disc_flag[(f == tfilt) & (ins == tinst)]):
										if not m and not me and not f and not fe: continue
										# TODO: compare od to disc_date.replace(' ','T')
										# if they're close or equal?  Set discovery flag
										photdatadict = {'obs_date':od.replace(' ','T'),
														'band':bandid,
														'photometry':phottableid}
										if m: photdatadict['mag'] = m
										if me: photdatadict['mag_err'] = me
										if f: photdatadict['flux'] = f
										if fe: photdatadict['flux_err'] = fe
										if df: photdatadict['discovery_point'] = 1
										photdataid = db.post_object_to_DB('photdata',photdatadict)

							# put in the galaxy photometry
							if ned_mag:
								try:
									unknowninstid = db.get_ID_from_DB('instruments','Unknown')
									unknowngroupid = db.get_ID_from_DB('observationgroups','NED')
									if not unknowngroupid:
										unknowngroupid = db.get_ID_from_DB('observationgroups','Unknown')
									unknownbandid = db.get_ID_from_DB('photometricbands','Unknown')
								
									hostphottabledict = {'host':hosturl,
														 'obs_group':unknowngroupid,
														 'instrument':unknowninstid}
									hostphottableid = db.post_object_to_DB('hostphotometry',hostphottabledict)
						
									# put in the photometry
									hostphotdatadict = {'obs_date':disc_date.replace(' ','T'),#'2000-01-01 00:00:00',
														'mag':ned_mag.decode('utf-8')[:-1],
														'band':unknownbandid,
														'photometry':hostphottableid}
									hostphotdataid = db.post_object_to_DB('hostphotdata',hostphotdatadict)
								except:
									print('getting host mag failed')
				# Mark messages as "Seen"
				result, wdata = mail.store(msg_ids[i], '+FLAGS', '\\Seen')

			except: # ValueError as err:
				for j in range(len(objs)):
					print('Something went wrong!!!	Sticking to basic info only')
					print("Object: %s\nRA: %s\nDEC: %s" % (objs[j].decode('utf-8'),
														   ras[j].decode('utf-8'),
														   decs[j].decode('utf-8')))
					
					snid = objs[j].decode("utf-8")
					# if source_group doesn't exist, we need to add it
					source_group = "Unknown"
					groupid = db.get_ID_from_DB('observationgroups',source_group)
					if not groupid:
						groupid = db.get_ID_from_DB('observationgroups','Unknown')#db.post_object_to_DB('observationgroup',{'name':source_group})

					# get the status
					statusid = db.get_ID_from_DB('transientstatuses','New')
					if not statusid: raise RuntimeError('Error : not all statuses are defined')
					
					dbid = db.get_ID_from_DB('transients',snid)
					k2id = db.get_ID_from_DB('internalsurveys','K2')
					# then POST or PUT, depending
					# put in main transient
					sc = SkyCoord(ras[j].decode("utf-8"),decs[j].decode("utf-8"),FK5,unit=(u.hourangle,u.deg))
					db.options.best_spec_classapi = db.options.transientclassesapi
					newobjdict = {'name':objs[j].decode("utf-8"),
						      'ra':sc.ra.deg,
						      'dec':sc.dec.deg,
						      'status':statusid,
						      'obs_group':groupid,
						      'tags':[]}

					if dbid:
						transientid = db.put_object_to_DB('transient',newobjdict,dbid)
					else:
						transientid = db.post_object_to_DB('transient',newobjdict)

		#WriteOutput(tns_objs)
		print("Process done.")
예제 #32
0
def add_extinction_old(catalogue,
                       getOptical=True,
                       getIR=True,
                       url_ned='http://ned.ipac.caltech.edu/cgi-bin/calc'):

    nsrcs = len(catalogue)
    A = np.full((nsrcs, 10), np.nan)
    coords = SkyCoord(ra=catalogue['posRA'], dec=catalogue['posDec'])

    start = 0
    for i in trange(nsrcs):
        # Get optical extinction from NED
        if getOptical:
            payload = {
                'in_csys': 'Equatorial',
                'in_equinox': 'J2000.0',
                'obs_epoch': '2000.0',
                'out_csys': 'Equatorial',
                'out_equinox': 'J2000.0',
                'lon': '{:.6f}d'.format(coords[i].ra.value),
                'lat': '{:.6f}d'.format(coords[i].dec.value)
            }

            end = time.time()
            if end - start < 1.0:
                time.sleep(1.0)
            else:
                r = requests.get(url_ned, params=payload)
            start = time.time()

            soup = BeautifulSoup(r.text, 'html5lib')
            ned_table = soup.find('div', id='moreBANDS')
            ned_table = ned_table.find('table')

            j = 0
            for row in ned_table.findAll("tr"):
                cells = row.findAll("td")
                if len(cells):
                    if cells[0].contents[0] == 'PS1' and j < 5:
                        A[i, j] = cells[3].contents[0]
                        j += 1

        if getIR:
            t = IrsaDust.get_extinction_table(coords[i])

            # NIR extinction
            if catalogue['NIRobjID'][i] > 0:
                if catalogue['NIR_SURVEY'][i] == '2MASS':
                    A[i, 5:8] = t['A_SandF'][16:19]

                elif (catalogue['NIR_SURVEY'][i] == 'UKIDSS'
                      or catalogue['NIR_SURVEY'][i] == 'VISTA'):
                    A[i, 5:8] = t['A_SandF'][13:16]

            # WISE extinction
            if catalogue['WSID'][i] > 0:
                A[i, 8:] = t['A_SandF'][23:25]

    A = Table(
        A,
        names=['Ag', 'Ar', 'Ai', 'Az', 'Ay', 'AJ', 'AH', 'AK', 'AW1', 'AW2'])

    catalogue = hstack([catalogue, A], join_type='exact')

    return catalogue
예제 #33
0
def sed(epic='211800191',ra='132.884782',dec='17.319834'):
	os.chdir('outputs/')

	urllib.urlretrieve('http://vizier.u-strasbg.fr/viz-bin/sed?-c='+ra+"%2C"+dec+'&-c.rs=1', epic+'_sed.vot')
	print 'http://vizier.u-strasbg.fr/viz-bin/sed?-c='+ra+"%2C"+dec+'&-c.rs=0.005'

	tb = votable.parse_single_table(epic+'_sed.vot')
	data = tb.array
	wav_all = 3e5 * 1e4 / data['_sed_freq'].data #angstrom
	f_all = data['_sed_flux'].data
	unc_all = data['_sed_eflux'].data
	filters = data['_sed_filter'].data

	filter_dict = {'2MASS:Ks':'2MASS Ks','2MASS:J':'2MASS J','2MASS:H':'2MASS H','WISE:W1':'WISE-1','WISE:W2':'WISE-2','SDSS:u':'SDSS u','SDSS:g':'SDSS g',\
	'SDSS:r':'SDSS r','SDSS:i':'SDSS i','SDSS:z':'SDSS z'}

	c = coord.SkyCoord(float(ra)*u.deg,float(dec)*u.deg,frame='icrs')
	tb = IrsaDust.get_extinction_table(c)
	filters2 = tb['Filter_name']
	allA = tb['A_SandF']
	A = []
	f_ob_orig = []
	wav_ob = []
	unc = []

	for f in filters:
		if f in filter_dict.keys():
			filtmatch = filter_dict[f]
			ind = where(filters2==filtmatch)[0]
			A.append(mean(allA[ind]))
			ind = where(filters==f)[0]
			f_ob_orig.append(mean(f_all[ind]))
			wav_ob.append(mean(wav_all[ind]))
			unc.append(mean(unc_all[ind]))

	f_ob_orig = array(f_ob_orig)
	A = array(A)
	wav_ob = array(wav_ob)
	unc = array(unc)
	f_ob = (f_ob_orig*10**(A/2.5))

	metallicity = ['ckp00']
	m = [0.0]
	t = arange(3500,13000,250)
	t2 = arange(14000,50000,1000)
	t = concatenate((t,t2),axis=1)

	log_g = ['g20','g25','g30','g35','g40','g45','g50']
	g = arange(2.,5.,0.5)

	best_m =0
	best_t =0
	best_g =0
	best_off =0.0
	chi2_rec = 1e6
	os.chdir('..')

	for im, mval in enumerate(m):
		for it, tval in enumerate(t):
			for ig, gval in enumerate(g):
				#load model
				hdulist = pyfits.open('fits/'+metallicity[im]+'/'+metallicity[im]+'_'+str(tval)+'.fits')
				data = hdulist[1].data
				wmod = data['WAVELENGTH']
				fmod = data[log_g[ig]]*3.34e4*wmod**2

				#fit observations
				f_int = exp( interp(log(wav_ob), log(wmod), log(fmod)) )
				offsets = linspace(log(min(f_ob/f_int)), log(max(f_ob/f_int)), 51)
				for i_off, offset in enumerate(offsets):
					chi2 = sum((f_int*exp(offset)-f_ob)**2)

					print 'chi2=', chi2, mval, tval, gval
					if chi2 < chi2_rec:
						chi2_rec = chi2
						best_m = im
						best_g = ig
						best_t = it
						best_off = offset

	print 'best fit: m=', m[best_m], 'T=', t[best_t], 'log g=', g[best_g]

	hdulist = pyfits.open('fits/'+metallicity[best_m]+'/'+metallicity[best_m]+'_'+str(t[best_t])+'.fits')
	data = hdulist[1].data
	wmod = data['WAVELENGTH']
	fmod = data[log_g[best_g]]*3.34e4*wmod**2
	fmod *= exp(best_off)

	plt.close('all')
	fig = plt.figure(figsize=(8,5))
	plt.plot(wmod/1e4, fmod,label='Castelli & Kurucz model')
	plt.xscale('log')
	plt.plot(wav_ob/1e4, f_ob_orig, lw=0, marker='s', label='Uncorrected',ms=10)
	plt.plot(wav_ob/1e4, f_ob, lw=0, marker='o', label='Corrected for extinction',ms=10)
	plt.xlabel(r'${\rm Wavelength} \ (\mu m)}$',fontsize=18)
	plt.xlim(0.1,max(wmod)/1e4)
	plt.ylabel(r'$F_{\nu} \ {\rm (Jy)}$',fontsize=18)
	plt.legend()
	plt.savefig('outputs/'+epic+'_sed.pdf',dpi=150)
	return t[best_t]
예제 #34
0
    filts.append(eff)
    ax1.fill(wlr,eff,label=f.split('.')[0],alpha=.5,edgecolor="none")
    ax1.axhline(spec,color="black",lw=3,alpha=.5)
    ax1.set_ylabel("Throughput")
    ax1.axes.get_xaxis().set_visible(False)

corrections=np.empty((len(filters),len(coords)))
mags_notred=np.empty(len(filters))
mags_red=np.empty((len(filters),len(coords)))
alambdas=[ [[] for _ in coords] for _ in filts]

# the following loop queries the IrsaDust database to obtain A_v according to S&F
# and converts it to A_lambda following the Fitzpatrick law
for i,c in enumerate(coords):
  C = coord.SkyCoord(c,frame="fk5")
  table=IrsaDust.get_query_table(c, radius=2.0 * u.deg)
  a_v=table["ext SandF mean"]
  al_plot=f99(wl,a_v.data[0]*3.1)
  for j,f in enumerate(filts):
      alambdas[j][i]=f99(wls[j],a_v.data[0]*3.1)
  ax2.plot(wl,al_plot,label="D"+str(i+1))
  ax2.set_xlabel(r"$\lambda$ in $\rm \AA$")
  ax2.set_ylabel("Extinction in magnitudes")
alambdas=np.array(alambdas)

# the following loop calculates the magnitudes of the flat f_nu spectra
for j,f in enumerate(filts):
    diffs=np.gradient(wls[j])
    flux=sum(wls[j]*spec*f*diffs)
    norm=sum(f*diffs/wls[j])
    for k,c in enumerate(coords):
예제 #35
0
def do_cleanup(catalog):
    """Task to cleanup catalog before final write."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = catalog.entries.copy().keys()

    cleanupcnt = 0
    for oname in pbar(keys, task_str):
        name = catalog.add_entry(oname)

        # Set the preferred name, switching to that name if name changed.
        name = catalog.entries[name].set_preferred_name()

        aliases = catalog.entries[name].get_aliases()
        catalog.entries[name].set_first_max_light()

        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['MLS', 'SSS', 'CSS', 'GRB ']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4],
                            alias.replace(prefix, '')[4:6]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = [
                'ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-', 'SNLS-',
                'SPIRITS', 'LSQ', 'DES', 'SNHiTS', 'Gaia', 'GND', 'GNW', 'GSD',
                'GSW', 'EGS', 'COS', 'OGLE', 'HST'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2]) and
                            is_number(alias.replace(prefix, '')[:1])):
                        discoverdate = '20' + alias.replace(prefix, '')[:2]
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['SNF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:4])):
                        discoverdate = ('/'.join([
                            alias.replace(prefix, '')[:4],
                            alias.replace(prefix, '')[4:6],
                            alias.replace(prefix, '')[6:8]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['PTFS', 'SNSDF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break
        if TIDALDISRUPTION.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['AT', 'SN', 'OGLE-', 'SM ', 'KSN-']
            for alias in aliases:
                for prefix in prefixes:
                    if alias.startswith(prefix):
                        year = re.findall(r'\d+', alias)
                        if len(year) == 1:
                            year = year[0]
                        else:
                            continue
                        if alias.replace(prefix, '').index(year) != 0:
                            continue
                        if (year and is_number(year) and '.' not in year and
                                len(year) <= 4):
                            discoverdate = year
                            if catalog.args.verbose:
                                tprint('Added discoverdate from name [' +
                                       alias + ']: ' + discoverdate)
                            source = catalog.entries[name].add_self_source()
                            catalog.entries[name].add_quantity(
                                TIDALDISRUPTION.DISCOVER_DATE,
                                discoverdate,
                                source,
                                derived=True)
                            break
                if TIDALDISRUPTION.DISCOVER_DATE in catalog.entries[name]:
                    break

        if (TIDALDISRUPTION.RA not in catalog.entries[name] or
                TIDALDISRUPTION.DEC not in catalog.entries[name]):
            prefixes = [
                'PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J', 'HST J', 'TCP J',
                'MACS J', '2MASS J', 'EQ J', 'CRTS J', 'SMT J'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix) and
                            is_number(alias.replace(prefix, '')[:6])):
                        noprefix = alias.split(':')[-1].replace(
                            prefix, '').replace('.', '')
                        decsign = '+' if '+' in noprefix else '-'
                        noprefix = noprefix.replace('+', '|').replace('-', '|')
                        nops = noprefix.split('|')
                        if len(nops) < 2:
                            continue
                        rastr = nops[0]
                        decstr = nops[1]
                        ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + \
                            ('.' + rastr[6:] if len(rastr) > 6 else '')
                        dec = (decsign + ':'.join(
                            [decstr[:2], decstr[2:4], decstr[4:6]]) +
                            ('.' + decstr[6:] if len(decstr) > 6 else ''))
                        if catalog.args.verbose:
                            tprint('Added ra/dec from name: ' + ra + ' ' + dec)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.RA, ra, source, derived=True)
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.DEC, dec, source, derived=True)
                        break
                if TIDALDISRUPTION.RA in catalog.entries[name]:
                    break

        no_host = (TIDALDISRUPTION.HOST not in catalog.entries[name] or
                   not any([
                       x[QUANTITY.VALUE] == 'Milky Way'
                       for x in catalog.entries[name][TIDALDISRUPTION.HOST]
                   ]))
        if (TIDALDISRUPTION.RA in catalog.entries[name] and
                TIDALDISRUPTION.DEC in catalog.entries[name] and no_host):
            from astroquery.irsa_dust import IrsaDust
            if name not in catalog.extinctions_dict:
                try:
                    ra_dec = (catalog.entries[name][TIDALDISRUPTION.RA][0][
                        QUANTITY.VALUE] + " " + catalog.entries[name][
                            TIDALDISRUPTION.DEC][0][QUANTITY.VALUE])
                    result = IrsaDust.get_query_table(ra_dec, section='ebv')
                except (KeyboardInterrupt, SystemExit):
                    raise
                except Exception:
                    warnings.warn("Coordinate lookup for " + name +
                                  " failed in IRSA.")
                else:
                    ebv = result['ext SandF mean'][0]
                    ebverr = result['ext SandF std'][0]
                    catalog.extinctions_dict[name] = [ebv, ebverr]
            if name in catalog.extinctions_dict:
                sources = uniq_cdl([
                    catalog.entries[name].add_self_source(),
                    catalog.entries[name]
                    .add_source(bibcode='2011ApJ...737..103S')
                ])
                (catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.EBV,
                    str(catalog.extinctions_dict[name][0]),
                    sources,
                    e_value=str(catalog.extinctions_dict[name][1]),
                    derived=True))
        if ((TIDALDISRUPTION.HOST in catalog.entries[name] and
             (TIDALDISRUPTION.HOST_RA not in catalog.entries[name] or
              TIDALDISRUPTION.HOST_DEC not in catalog.entries[name]))):
            for host in catalog.entries[name][TIDALDISRUPTION.HOST]:
                alias = host[QUANTITY.VALUE]
                if ' J' in alias and is_number(alias.split(' J')[-1][:6]):
                    noprefix = alias.split(' J')[-1].split(':')[-1].replace(
                        '.', '')
                    decsign = '+' if '+' in noprefix else '-'
                    noprefix = noprefix.replace('+', '|').replace('-', '|')
                    nops = noprefix.split('|')
                    if len(nops) < 2:
                        continue
                    rastr = nops[0]
                    decstr = nops[1]
                    hostra = (':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) +
                              ('.' + rastr[6:] if len(rastr) > 6 else ''))
                    hostdec = decsign + ':'.join([
                        decstr[:2], decstr[2:4], decstr[4:6]
                    ]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
                    if catalog.args.verbose:
                        tprint('Added hostra/hostdec from name: ' + hostra +
                               ' ' + hostdec)
                    source = catalog.entries[name].add_self_source()
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_RA, hostra, source, derived=True)
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_DEC,
                        hostdec,
                        source,
                        derived=True)
                    break
                if TIDALDISRUPTION.HOST_RA in catalog.entries[name]:
                    break

        if (TIDALDISRUPTION.REDSHIFT not in catalog.entries[name] and
                TIDALDISRUPTION.VELOCITY in catalog.entries[name]):
            # Find the "best" velocity to use for this
            bestsig = 0
            for hv in catalog.entries[name][TIDALDISRUPTION.VELOCITY]:
                sig = get_sig_digits(hv[QUANTITY.VALUE])
                if sig > bestsig:
                    besthv = hv[QUANTITY.VALUE]
                    bestsrc = hv['source']
                    bestsig = sig
            if bestsig > 0 and is_number(besthv):
                voc = float(besthv) * 1.e5 / CLIGHT
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                (catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.REDSHIFT,
                    pretty_num(
                        sqrt((1. + voc) / (1. - voc)) - 1., sig=bestsig),
                    sources,
                    kind='heliocentric',
                    derived=True))
        if (TIDALDISRUPTION.REDSHIFT not in catalog.entries[name] and
                len(catalog.nedd_dict) > 0 and
                TIDALDISRUPTION.HOST in catalog.entries[name]):
            reference = "NED-D"
            refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
            for host in catalog.entries[name][TIDALDISRUPTION.HOST]:
                if host[QUANTITY.VALUE] in catalog.nedd_dict:
                    source = catalog.entries[name].add_source(
                        bibcode='2016A&A...594A..13P')
                    secondarysource = catalog.entries[name].add_source(
                        name=reference, url=refurl, secondary=True)
                    meddist = statistics.median(catalog.nedd_dict[host[
                        QUANTITY.VALUE]])
                    redz = z_at_value(cosmo.comoving_distance,
                                      float(meddist) * un.Mpc)
                    redshift = pretty_num(
                        redz, sig=get_sig_digits(str(meddist)))
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.REDSHIFT,
                        redshift,
                        uniq_cdl([source, secondarysource]),
                        kind='host',
                        derived=True)
        if (TIDALDISRUPTION.MAX_ABS_MAG not in catalog.entries[name] and
                TIDALDISRUPTION.MAX_APP_MAG in catalog.entries[name] and
                TIDALDISRUPTION.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][TIDALDISRUPTION.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld['source']
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                bestldz = z_at_value(cosmo.luminosity_distance,
                                     float(bestld) * un.Mpc)
                pnum = (float(catalog.entries[name][
                    TIDALDISRUPTION.MAX_APP_MAG][0][QUANTITY.VALUE]) - 5.0 *
                    (log10(float(bestld) * 1.0e6) - 1.0
                     ) + 2.5 * log10(1.0 + bestldz))
                pnum = pretty_num(pnum, sig=bestsig)
                catalog.entries[name].add_quantity(
                    TIDALDISRUPTION.MAX_ABS_MAG, pnum, sources, derived=True)
        if TIDALDISRUPTION.REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift()
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if TIDALDISRUPTION.VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.VELOCITY,
                        pnum,
                        source,
                        kind=PREF_KINDS[bestkind],
                        derived=True)
                if bestz > 0.:
                    from astropy.cosmology import Planck15 as cosmo
                    if TIDALDISRUPTION.LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name]
                            .add_source(bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.LUM_DIST,
                            pretty_num(
                                dl.value, sig=bestsig),
                            sources,
                            kind=PREF_KINDS[bestkind],
                            derived=True)
                        if (TIDALDISRUPTION.MAX_ABS_MAG not in
                                catalog.entries[name] and
                                TIDALDISRUPTION.MAX_APP_MAG in
                                catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(
                                float(catalog.entries[name][
                                    TIDALDISRUPTION.MAX_APP_MAG][0][
                                        QUANTITY.VALUE]) - 5.0 *
                                (log10(dl.to('pc').value) - 1.0
                                 ) + 2.5 * log10(1.0 + bestz),
                                sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                TIDALDISRUPTION.MAX_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                    if TIDALDISRUPTION.COMOVING_DIST not in catalog.entries[
                            name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name]
                            .add_source(bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            TIDALDISRUPTION.COMOVING_DIST,
                            pretty_num(
                                cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if all([
                x in catalog.entries[name]
                for x in [
                    TIDALDISRUPTION.RA, TIDALDISRUPTION.DEC,
                    TIDALDISRUPTION.HOST_RA, TIDALDISRUPTION.HOST_DEC
                ]
        ]):
            # For now just using first coordinates that appear in entry
            try:
                c1 = coord(
                    ra=catalog.entries[name][TIDALDISRUPTION.RA][0][
                        QUANTITY.VALUE],
                    dec=catalog.entries[name][TIDALDISRUPTION.DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
                c2 = coord(
                    ra=catalog.entries[name][TIDALDISRUPTION.HOST_RA][0][
                        QUANTITY.VALUE],
                    dec=catalog.entries[name][TIDALDISRUPTION.HOST_DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                pass
            else:
                sources = uniq_cdl(
                    [catalog.entries[name].add_self_source()] + catalog.
                    entries[name][TIDALDISRUPTION.RA][0]['source'].split(',') +
                    catalog.entries[name][TIDALDISRUPTION.DEC][0]['source'].
                    split(',') + catalog.entries[name][TIDALDISRUPTION.HOST_RA]
                    [0]['source'].split(',') + catalog.entries[name][
                        TIDALDISRUPTION.HOST_DEC][0]['source'].split(','))
                if 'hostoffsetang' not in catalog.entries[name]:
                    hosa = Decimal(
                        hypot(c1.ra.degree - c2.ra.degree, c1.dec.degree -
                              c2.dec.degree))
                    hosa = pretty_num(hosa * Decimal(3600.))
                    catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_OFFSET_ANG,
                        hosa,
                        sources,
                        derived=True,
                        u_value='arcseconds')
                if (TIDALDISRUPTION.COMOVING_DIST in catalog.entries[name] and
                        TIDALDISRUPTION.REDSHIFT in catalog.entries[name] and
                        TIDALDISRUPTION.HOST_OFFSET_DIST not in
                        catalog.entries[name]):
                    offsetsig = get_sig_digits(catalog.entries[name][
                        TIDALDISRUPTION.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                    sources = uniq_cdl(
                        sources.split(',') + (catalog.entries[name][
                            TIDALDISRUPTION.COMOVING_DIST][0]['source']).
                        split(',') + (catalog.entries[name][
                            TIDALDISRUPTION.REDSHIFT][0]['source']).split(','))
                    (catalog.entries[name].add_quantity(
                        TIDALDISRUPTION.HOST_OFFSET_DIST,
                        pretty_num(
                            float(catalog.entries[name][
                                TIDALDISRUPTION.HOST_OFFSET_ANG][0][
                                QUANTITY.VALUE]) / 3600. * (pi / 180.) *
                            float(catalog.entries[name][
                                TIDALDISRUPTION.COMOVING_DIST][0][
                                    QUANTITY.VALUE]) * 1000. /
                            (1.0 + float(catalog.entries[name][
                                TIDALDISRUPTION.REDSHIFT][0][QUANTITY.VALUE])),
                            sig=offsetsig),
                        sources))

        catalog.entries[name].sanitize()
        catalog.journal_entries(bury=True, final=True, gz=True)
        cleanupcnt = cleanupcnt + 1
        if catalog.args.travis and cleanupcnt % 1000 == 0:
            break

    catalog.save_caches()

    return
예제 #36
0
def ppxf_population_gas_sdss(file, z, name):

    # Read SDSS DR8 galaxy spectrum taken from here http://www.sdss3.org/dr8/
    # The spectrum is *already* log rebinned by the SDSS DR8
    # pipeline and log_rebin should not be used in this case.

    hdulist = pyfits.open(file)
    VAC = (10**hdulist[1].data.loglam)
    wave = []
    for i in range(0, len(VAC)):
        wave.append(VAC[i] / (1.0 + 2.735182E-4 + 131.4182 / VAC[i]**2 + 2.76249E8 / VAC[i]**4) / (1+z))
    flux = hdulist[1].data.flux*10**-17
    err = hdulist[1].data.ivar*10**-17
    #bunit = hdulist[0].header['bunit']
    #c0 = hdulist[0].header['coeff0']
    #c1 = hdulist[0].header['coeff1']
    #units = 'erg/s/cm^2/Ang'

    xarr = pyspeckit.units.SpectroscopicAxis(wave, units='angstroms')
    spec = pyspeckit.OpticalSpectrum(header=hdulist[0].header, xarr=xarr, data=flux*1e17, error=err)
    #spec.units = 'erg s^{-1} cm^{-2} \\AA^{-1}'
    #spec.xarr.units='angstroms'

    #Galactic extinction correction
    #Take the ebv of the galaxy from IrsaDust
    table = IrsaDust.get_query_table(name, section='ebv')
    ebv = table['ext SFD mean'][0]

    spec.deredden(ebv=ebv)  # deredden in place
    t = hdulist[1].data
    #z = float(hdu[1].header["Z"]) # SDSS redshift estimate

    # Create the mask
    # Only use the wavelength range in common between galaxy and stellar library.
    mask = [True]*(len(wave))
    for i in range(0, len(wave)):
        #mask[i]=(wave[i] > 3540) & (wave[i] < 7409)
        mask[i] = (wave[i] > 3750) & (wave[i] < 7400)  # take a smaller the wavelength range

    #mask for the galaxy
    galaxy = t.field('flux')/np.median(t.field('flux'))  # Normalize spectrum to avoid numerical issues

    galaxymask = []
    for i in range(0, len(mask)):
        if mask[i]:
            galaxymask.append(galaxy[i])

    galaxy = np.array(galaxymask)

    #mask for the wavelength
    #create an array with only the allowed values of the wavelenght
    wavemask = []
    for i in range(0, len(mask)):
        if mask[i]:
            wavemask.append(wave[i])

    wave = np.array(wavemask)

    #create a mask for the emission lines
    NeIIIa = 3869.9
    NeIIIb = 3971.1
    Heps = 3890.2
    Hdelta = 4102.9
    Hgamma = 4341.7
    OIIIc = 4364.4
    HeIIa = 4687.0
    HeIIb = 5413.0
    SIII = 6313.8
    OIa = 5578.9
    OIb = 6365.5

    Hbeta = 4861.33
    OIIIa = 4958.92
    OIIIb = 5006.84
    OI = 6300.30
    NIIa = 6549.86
    NIIb = 6585.27
    Halpha = 6564.614
    SIIa = 6718.2
    SIIb = 6732.68
    ArIII = 7137.8

    delta = 10
    delta2 = 20
    maskHa = [True]*(len(wave))
    for i in range(0, len(wave)):
        maskHa[i] = (((wave[i] < (Halpha - delta2)) or (wave[i] > (Halpha + delta2))) &
                    ((wave[i] < (Hbeta - delta2)) or (wave[i] > (Hbeta + delta2))) &
                    ((wave[i] < (OIIIa - delta)) or (wave[i] > (OIIIa + delta))) &
                    ((wave[i] < (OIIIb - delta)) or (wave[i] > (OIIIb + delta))) &
                    ((wave[i] < (OI - delta)) or (wave[i] > (OI + delta))) &
                    ((wave[i] < (NIIa - delta)) or (wave[i] > (NIIa + delta))) &
                    ((wave[i] < (NIIb - delta)) or (wave[i] > (NIIb + delta))) &
                    ((wave[i] < (SIIa - delta)) or (wave[i] > (SIIa + delta))) &
                    ((wave[i] < (SIIb - delta)) or (wave[i] > (SIIb + delta))) &
                    ((wave[i] < (NeIIIa - delta)) or (wave[i] > (NeIIIa + delta))) &
                    ((wave[i] < (NeIIIb - delta)) or (wave[i] > (NeIIIb + delta))) &
                    ((wave[i] < (Heps - delta)) or (wave[i] > (Heps + delta))) &
                    ((wave[i] < (Hdelta - delta)) or (wave[i] > (Hdelta + delta))) &
                    ((wave[i] < (Hgamma - delta)) or (wave[i] > (Hgamma + delta))) &
                    ((wave[i] < (OIIIc - delta)) or (wave[i] > (OIIIc + delta))) &
                    ((wave[i] < (HeIIa - delta)) or (wave[i] > (HeIIa + delta))) &
                    ((wave[i] < (HeIIb - delta)) or (wave[i] > (HeIIb + delta))) &
                    ((wave[i] < (SIII - delta)) or (wave[i] > (SIII + delta))) &
                    ((wave[i] < (OIa - delta)) or (wave[i] > (OIa + delta))) &
                    ((wave[i] < (OIb - delta)) or (wave[i] > (OIb + delta))) &
                    ((wave[i] < (ArIII - delta)) or (wave[i] > (ArIII + delta))))

    # mask for the wavelength for the emission lines
    # create an array with only the allowed values of the wavelenght
    wavemask = []
    for i in range(0, len(maskHa)):
        if maskHa[i]:
            wavemask.append(wave[i])

    wave = np.array(wavemask)

    #Use this mask for the galaxy
    galaxymask = []
    for i in range(0, len(maskHa)):
        if maskHa[i]:
            galaxymask.append(galaxy[i])
        
    galaxy = np.array(galaxymask)
    
    # The noise level is chosen to give Chi^2/DOF=1 without regularization (REGUL=0)
    #
    #
    noise = galaxy*0 + 0.01528           # Assume constant noise per pixel here

    # The velocity step was already chosen by the SDSS pipeline
    # and we convert it below to km/s
    #
    c = 299792.458  # speed of light in km/s
    velscale = np.log(wave[1]/wave[0])*c
    FWHM_gal = 2.76  # SDSS has an instrumental resolution FWHM of 2.76A.

    stars_templates, lamRange_temp, logLam_temp = \
        setup_spectral_library(velscale, FWHM_gal)

    # The stellar templates are reshaped into a 2-dim array with each spectrum
    # as a column, however we save the original array dimensions, which are
    # needed to specify the regularization dimensions
    #
    reg_dim = stars_templates.shape[1:]
    stars_templates = stars_templates.reshape(stars_templates.shape[0], -1)

    # See the pPXF documentation for the keyword REGUL,
    # for an explanation of the following two lines
    #
    stars_templates /= np.median(stars_templates)  # Normalizes stellar templates by a scalar
    regul_err = 0.004  # Desired regularization error

    # Construct a set of Gaussian emission line templates
    #
    gas_templates = util.emission_lines(logLam_temp, FWHM_gal)

    # Combines the stellar and gaseous templates into a single array
    # during the PPXF fit they will be assigned a different kinematic
    # COMPONENT value
    #
    templates = np.hstack([stars_templates, gas_templates])

    # The galaxy and the template spectra do not have the same starting wavelength.
    # For this reason an extra velocity shift DV has to be applied to the template
    # to fit the galaxy spectrum. We remove this artificial shift by using the
    # keyword VSYST in the call to PPXF below, so that all velocities are
    # measured with respect to DV. This assume the redshift is negligible.
    # In the case of a high-redshift galaxy one should de-redshift its
    # wavelength to the rest frame before using the line below as described
    # in PPXF_KINEMATICS_EXAMPLE_SAURON.
    #
    z = 0  # redshift already corrected
    c = 299792.458
    dv = (np.log(lamRange_temp[0])-np.log(wave[0]))*c  # km/s
    vel = c*z  # Initial estimate of the galaxy velocity in km/s
    
    # Here the actual fit starts. The best fit is plotted on the screen.
    #
    # IMPORTANT: Ideally one would like not to use any polynomial in the fit
    # as the continuum shape contains important information on the population.
    # Unfortunately this is often not feasible, due to small calibration
    # uncertainties in the spectral shape. To avoid affecting the line strength of
    # the spectral features, we exclude additive polynomials (DEGREE=-1) and only use
    # multiplicative ones (MDEGREE=10). This is only recommended for population, not
    # for kinematic extraction, where additive polynomials are always recommended.
    #
    start = [vel, 180.]  # (km/s), starting guess for [V,sigma]

    t = clock()

    plt.clf()
    plt.subplot(211)

    # Assign component=0 to the stellar templates and
    # component=1 to the gas emission lines templates.
    # One can easily assign different components to different gas species
    # e.g. component=1 for the Balmer series, component=2 for the [OIII] doublet, ...)
    # Input a negative MOMENTS value to keep fixed the LOSVD of a component.
    #
    component = [0]*stars_templates.shape[1] + [1]*gas_templates.shape[1]
    moments = [4, 4]  # fit (V,sig,h3,h4) for both the stars and the gas
    start = [start, start]  # adopt the same starting value for both gas and stars

    pp = ppxf(file, templates, wave, galaxy, noise, velscale, start,
              plot=True, moments=moments, degree=-1, mdegree=10,
              vsyst=dv, clean=False, regul=1./regul_err,
              reg_dim=reg_dim, component=component)

    # When the two numbers below are the same, the solution is the smoothest
    # consistent with the observed spectrum.
    #
    print 'Desired Delta Chi^2:', np.sqrt(2*galaxy.size)
    print 'Current Delta Chi^2:', (pp.chi2 - 1)*galaxy.size
    print 'elapsed time in PPXF (s):', clock() - t

    plt.subplot(212)
    #plt.set_cmap('gist_heat') # = IDL's loadct, 3
    plt.imshow(np.rot90(pp.weights[:np.prod(reg_dim)].reshape(reg_dim)/pp.weights.sum()),
               interpolation='nearest', aspect='auto', extent=(np.log(1.0),
               np.log(17.7828), -1.9, 0.45))
    plt.set_cmap('gist_heat')  # = IDL's loadct, 3
    plt.colorbar()
    plt.title("Mass Fraction")
    plt.xlabel("log Age (Gyr)")
    plt.ylabel("[M/H]")
    plt.tight_layout()

    # Save the figure
    name = splitext(basename(file))[0]
    plt.savefig(name)

    return
예제 #37
0
def do_cleanup(catalog):
    """Cleanup catalog after importing all data."""
    task_str = catalog.get_current_task_str()

    # Set preferred names, calculate some columns based on imported data,
    # sanitize some fields
    keys = list(catalog.entries.keys())

    cleanupcnt = 0
    for oname in pbar(keys, task_str):
        # Some events may be merged in cleanup process, skip them if
        # non-existent.
        try:
            name = catalog.add_entry(oname)
        except Exception:
            catalog.log.warning(
                '"{}" was not found, suggests merge occurred in cleanup '
                'process.'.format(oname))
            continue

        # Set the preferred name, switching to that name if name changed.
        name = catalog.entries[name].set_preferred_name()

        aliases = catalog.entries[name].get_aliases()
        catalog.entries[name].purge_bandless_photometry()
        catalog.entries[name].set_first_max_light()

        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['MLS', 'SSS', 'CSS', 'GRB ']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4],
                            alias.replace(prefix, '')[4:6]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = [
                'ASASSN-', 'PS1-', 'PS1', 'PS', 'iPTF', 'PTF', 'SCP-', 'SNLS-',
                'SPIRITS', 'LSQ', 'DES', 'SNHiTS', 'Gaia', 'GND', 'GNW', 'GSD',
                'GSW', 'EGS', 'COS', 'OGLE', 'HST'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])
                            and is_number(alias.replace(prefix, '')[:1])):
                        discoverdate = '20' + alias.replace(prefix, '')[:2]
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['SNF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:4])):
                        discoverdate = ('/'.join([
                            alias.replace(prefix, '')[:4],
                            alias.replace(prefix, '')[4:6],
                            alias.replace(prefix, '')[6:8]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['PTFS', 'SNSDF']
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:2])):
                        discoverdate = ('/'.join([
                            '20' + alias.replace(prefix, '')[:2],
                            alias.replace(prefix, '')[2:4]
                        ]))
                        if catalog.args.verbose:
                            tprint('Added discoverdate from name [' + alias +
                                   ']: ' + discoverdate)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.DISCOVER_DATE,
                            discoverdate,
                            source,
                            derived=True)
                        break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break
        if SUPERNOVA.DISCOVER_DATE not in catalog.entries[name]:
            prefixes = ['AT', 'SN', 'OGLE-', 'SM ', 'KSN']
            for alias in aliases:
                for prefix in prefixes:
                    if alias.startswith(prefix):
                        year = re.findall(r'\d+', alias)
                        if len(year) == 1:
                            year = year[0]
                        else:
                            continue
                        if alias.replace(prefix, '').index(year) != 0:
                            continue
                        if (year and is_number(year) and '.' not in year
                                and len(year) <= 4):
                            discoverdate = year
                            if catalog.args.verbose:
                                tprint('Added discoverdate from name [' +
                                       alias + ']: ' + discoverdate)
                            source = catalog.entries[name].add_self_source()
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.DISCOVER_DATE,
                                discoverdate,
                                source,
                                derived=True)
                            break
                if SUPERNOVA.DISCOVER_DATE in catalog.entries[name]:
                    break

        if (SUPERNOVA.RA not in catalog.entries[name]
                or SUPERNOVA.DEC not in catalog.entries[name]):
            prefixes = [
                'PSN J', 'MASJ', 'CSS', 'SSS', 'MASTER OT J', 'HST J', 'TCP J',
                'MACS J', '2MASS J', 'EQ J', 'CRTS J', 'SMT J'
            ]
            for alias in aliases:
                for prefix in prefixes:
                    if (alias.startswith(prefix)
                            and is_number(alias.replace(prefix, '')[:6])):
                        noprefix = alias.split(':')[-1].replace(prefix,
                                                                '').replace(
                                                                    '.', '')
                        decsign = '+' if '+' in noprefix else '-'
                        noprefix = noprefix.replace('+', '|').replace('-', '|')
                        nops = noprefix.split('|')
                        if len(nops) < 2:
                            continue
                        rastr = nops[0]
                        decstr = nops[1]
                        ra = ':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) + \
                            ('.' + rastr[6:] if len(rastr) > 6 else '')
                        dec = (
                            decsign +
                            ':'.join([decstr[:2], decstr[2:4], decstr[4:6]]) +
                            ('.' + decstr[6:] if len(decstr) > 6 else ''))
                        if catalog.args.verbose:
                            tprint('Added ra/dec from name: ' + ra + ' ' + dec)
                        source = catalog.entries[name].add_self_source()
                        catalog.entries[name].add_quantity(SUPERNOVA.RA,
                                                           ra,
                                                           source,
                                                           derived=True)
                        catalog.entries[name].add_quantity(SUPERNOVA.DEC,
                                                           dec,
                                                           source,
                                                           derived=True)
                        break
                if SUPERNOVA.RA in catalog.entries[name]:
                    break

        no_host = (SUPERNOVA.HOST not in catalog.entries[name] or not any([
            x[QUANTITY.VALUE] == 'Milky Way'
            for x in catalog.entries[name][SUPERNOVA.HOST]
        ]))
        if (SUPERNOVA.RA in catalog.entries[name]
                and SUPERNOVA.DEC in catalog.entries[name] and no_host):
            from astroquery.irsa_dust import IrsaDust
            if name not in catalog.extinctions_dict:
                try:
                    ra_dec = catalog.entries[name][
                        SUPERNOVA.RA][0][QUANTITY.VALUE] + \
                        " " + \
                        catalog.entries[name][SUPERNOVA.DEC][0][QUANTITY.VALUE]
                    result = IrsaDust.get_query_table(ra_dec, section='ebv')
                except (KeyboardInterrupt, SystemExit):
                    raise
                except Exception:
                    warnings.warn("Coordinate lookup for " + name +
                                  " failed in IRSA.")
                else:
                    ebv = result['ext SandF mean'][0]
                    ebverr = result['ext SandF std'][0]
                    catalog.extinctions_dict[name] = [ebv, ebverr]
            if name in catalog.extinctions_dict:
                sources = uniq_cdl([
                    catalog.entries[name].add_self_source(),
                    catalog.entries[name].add_source(
                        bibcode='2011ApJ...737..103S')
                ])
                (catalog.entries[name].add_quantity(
                    SUPERNOVA.EBV,
                    str(catalog.extinctions_dict[name][0]),
                    sources,
                    e_value=str(catalog.extinctions_dict[name][1]),
                    derived=True))
        if ((SUPERNOVA.HOST in catalog.entries[name]
             and (SUPERNOVA.HOST_RA not in catalog.entries[name]
                  or SUPERNOVA.HOST_DEC not in catalog.entries[name]))):
            for host in catalog.entries[name][SUPERNOVA.HOST]:
                alias = host[QUANTITY.VALUE]
                if ' J' in alias and is_number(alias.split(' J')[-1][:6]):
                    noprefix = alias.split(' J')[-1].split(':')[-1].replace(
                        '.', '')
                    decsign = '+' if '+' in noprefix else '-'
                    noprefix = noprefix.replace('+', '|').replace('-', '|')
                    nops = noprefix.split('|')
                    if len(nops) < 2:
                        continue
                    rastr = nops[0]
                    decstr = nops[1]
                    hostra = (':'.join([rastr[:2], rastr[2:4], rastr[4:6]]) +
                              ('.' + rastr[6:] if len(rastr) > 6 else ''))
                    hostdec = decsign + ':'.join([
                        decstr[:2], decstr[2:4], decstr[4:6]
                    ]) + ('.' + decstr[6:] if len(decstr) > 6 else '')
                    if catalog.args.verbose:
                        tprint('Added hostra/hostdec from name: ' + hostra +
                               ' ' + hostdec)
                    source = catalog.entries[name].add_self_source()
                    catalog.entries[name].add_quantity(SUPERNOVA.HOST_RA,
                                                       hostra,
                                                       source,
                                                       derived=True)
                    catalog.entries[name].add_quantity(SUPERNOVA.HOST_DEC,
                                                       hostdec,
                                                       source,
                                                       derived=True)
                    break
                if SUPERNOVA.HOST_RA in catalog.entries[name]:
                    break

        if (SUPERNOVA.REDSHIFT not in catalog.entries[name]
                and SUPERNOVA.VELOCITY in catalog.entries[name]):
            # Find the "best" velocity to use for this
            bestsig = 0
            for hv in catalog.entries[name][SUPERNOVA.VELOCITY]:
                sig = get_sig_digits(hv[QUANTITY.VALUE])
                if sig > bestsig:
                    besthv = hv[QUANTITY.VALUE]
                    bestsrc = hv['source']
                    bestsig = sig
            if bestsig > 0 and is_number(besthv):
                voc = float(besthv) * 1.e5 / CLIGHT
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                (catalog.entries[name].add_quantity(
                    SUPERNOVA.REDSHIFT,
                    pretty_num(sqrt((1. + voc) / (1. - voc)) - 1.,
                               sig=bestsig),
                    sources,
                    kind='heliocentric',
                    derived=True))
        if (SUPERNOVA.REDSHIFT not in catalog.entries[name]
                and len(catalog.nedd_dict) > 0
                and SUPERNOVA.HOST in catalog.entries[name]):
            reference = "NED-D"
            refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
            refbib = "1991ASSL..171...89H"
            for host in catalog.entries[name][SUPERNOVA.HOST]:
                if host[QUANTITY.VALUE] in catalog.nedd_dict:
                    source = catalog.entries[name].add_source(
                        bibcode='2016A&A...594A..13P')
                    secondarysource = catalog.entries[name].add_source(
                        name=reference,
                        url=refurl,
                        bibcode=refbib,
                        secondary=True)
                    meddist = statistics.median(
                        catalog.nedd_dict[host[QUANTITY.VALUE]])
                    redz = z_at_value(cosmo.comoving_distance,
                                      float(meddist) * un.Mpc)
                    redshift = pretty_num(redz,
                                          sig=get_sig_digits(str(meddist)))
                    catalog.entries[name].add_quantity(
                        [SUPERNOVA.REDSHIFT, SUPERNOVA.HOST_REDSHIFT],
                        redshift,
                        uniq_cdl([source, secondarysource]),
                        kind='host',
                        derived=True)
        if (SUPERNOVA.MAX_ABS_MAG not in catalog.entries[name]
                and SUPERNOVA.MAX_APP_MAG in catalog.entries[name]
                and SUPERNOVA.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][SUPERNOVA.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld[QUANTITY.SOURCE]
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                bestldz = z_at_value(cosmo.luminosity_distance,
                                     float(bestld) * un.Mpc)
                pnum = (float(catalog.entries[name][SUPERNOVA.MAX_APP_MAG][0][
                    QUANTITY.VALUE]) - 5.0 *
                        (log10(float(bestld) * 1.0e6) - 1.0) +
                        2.5 * log10(1.0 + bestldz))
                pnum = pretty_num(pnum, sig=bestsig + 1)
                catalog.entries[name].add_quantity(SUPERNOVA.MAX_ABS_MAG,
                                                   pnum,
                                                   sources,
                                                   derived=True)
        if (SUPERNOVA.MAX_VISUAL_ABS_MAG not in catalog.entries[name]
                and SUPERNOVA.MAX_VISUAL_APP_MAG in catalog.entries[name]
                and SUPERNOVA.LUM_DIST in catalog.entries[name]):
            # Find the "best" distance to use for this
            bestsig = 0
            for ld in catalog.entries[name][SUPERNOVA.LUM_DIST]:
                sig = get_sig_digits(ld[QUANTITY.VALUE])
                if sig > bestsig:
                    bestld = ld[QUANTITY.VALUE]
                    bestsrc = ld[QUANTITY.SOURCE]
                    bestsig = sig
            if bestsig > 0 and is_number(bestld) and float(bestld) > 0.:
                source = catalog.entries[name].add_self_source()
                sources = uniq_cdl([source] + bestsrc.split(','))
                # FIX: what's happening here?!
                pnum = (float(catalog.entries[name][
                    SUPERNOVA.MAX_VISUAL_APP_MAG][0][QUANTITY.VALUE]) - 5.0 *
                        (log10(float(bestld) * 1.0e6) - 1.0))
                pnum = pretty_num(pnum, sig=bestsig + 1)
                catalog.entries[name].add_quantity(
                    SUPERNOVA.MAX_VISUAL_ABS_MAG, pnum, sources, derived=True)
        if SUPERNOVA.REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift()
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if SUPERNOVA.VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.VELOCITY,
                        pnum,
                        source,
                        kind=(SUPERNOVA.VELOCITY.kind_preference[bestkind]
                              if bestkind else ''))
                if bestz > 0.:
                    if SUPERNOVA.LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.LUM_DIST,
                            pretty_num(dl.value, sig=bestsig + 1),
                            sources,
                            kind=(SUPERNOVA.LUM_DIST.kind_preference[bestkind]
                                  if bestkind else ''),
                            derived=True)
                        if (SUPERNOVA.MAX_ABS_MAG not in catalog.entries[name]
                                and SUPERNOVA.MAX_APP_MAG
                                in catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(
                                float(catalog.entries[name][
                                    SUPERNOVA.MAX_APP_MAG][0][QUANTITY.VALUE])
                                - 5.0 * (log10(dl.to('pc').value) - 1.0) +
                                2.5 * log10(1.0 + bestz),
                                sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.MAX_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                        if (SUPERNOVA.MAX_VISUAL_ABS_MAG
                                not in catalog.entries[name]
                                and SUPERNOVA.MAX_VISUAL_APP_MAG
                                in catalog.entries[name]):
                            source = catalog.entries[name].add_self_source()
                            pnum = pretty_num(float(catalog.entries[name][
                                SUPERNOVA.MAX_VISUAL_APP_MAG][0][
                                    QUANTITY.VALUE]) - 5.0 *
                                              (log10(dl.to('pc').value) - 1.0),
                                              sig=bestsig + 1)
                            catalog.entries[name].add_quantity(
                                SUPERNOVA.MAX_VISUAL_ABS_MAG,
                                pnum,
                                sources,
                                derived=True)
                    if SUPERNOVA.COMOVING_DIST not in catalog.entries[name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.COMOVING_DIST,
                            pretty_num(cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if SUPERNOVA.HOST_REDSHIFT in catalog.entries[name]:
            # Find the "best" redshift to use for this
            bestz, bestkind, bestsig, bestsrc = catalog.entries[
                name].get_best_redshift(SUPERNOVA.HOST_REDSHIFT)
            if bestsig > 0:
                try:
                    bestz = float(bestz)
                except Exception:
                    print(catalog.entries[name])
                    raise
                if SUPERNOVA.HOST_VELOCITY not in catalog.entries[name]:
                    source = catalog.entries[name].add_self_source()
                    # FIX: what's happening here?!
                    pnum = CLIGHT / KM * \
                        ((bestz + 1.)**2. - 1.) / ((bestz + 1.)**2. + 1.)
                    pnum = pretty_num(pnum, sig=bestsig)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_VELOCITY,
                        pnum,
                        source,
                        kind=(SUPERNOVA.HOST_VELOCITY.kind_preference[bestkind]
                              if bestkind else ''))
                if bestz > 0.:
                    if SUPERNOVA.HOST_LUM_DIST not in catalog.entries[name]:
                        dl = cosmo.luminosity_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST_LUM_DIST,
                            pretty_num(dl.value, sig=bestsig + 1),
                            sources,
                            kind=(SUPERNOVA.HOST_LUM_DIST.
                                  kind_preference[bestkind]
                                  if bestkind else ''),
                            derived=True)
                    if SUPERNOVA.HOST_COMOVING_DIST not in catalog.entries[
                            name]:
                        cd = cosmo.comoving_distance(bestz)
                        sources = [
                            catalog.entries[name].add_self_source(),
                            catalog.entries[name].add_source(
                                bibcode='2016A&A...594A..13P')
                        ]
                        sources = uniq_cdl(sources + bestsrc.split(','))
                        catalog.entries[name].add_quantity(
                            SUPERNOVA.HOST_COMOVING_DIST,
                            pretty_num(cd.value, sig=bestsig),
                            sources,
                            derived=True)
        if all([
                x in catalog.entries[name] for x in [
                    SUPERNOVA.RA, SUPERNOVA.DEC, SUPERNOVA.HOST_RA,
                    SUPERNOVA.HOST_DEC
                ]
        ]):
            # For now just using first coordinates that appear in entry
            try:
                c1 = coord(
                    ra=catalog.entries[name][SUPERNOVA.RA][0][QUANTITY.VALUE],
                    dec=catalog.entries[name][SUPERNOVA.DEC][0][
                        QUANTITY.VALUE],
                    unit=(un.hourangle, un.deg))
                c2 = coord(ra=catalog.entries[name][SUPERNOVA.HOST_RA][0][
                    QUANTITY.VALUE],
                           dec=catalog.entries[name][SUPERNOVA.HOST_DEC][0][
                               QUANTITY.VALUE],
                           unit=(un.hourangle, un.deg))
            except (KeyboardInterrupt, SystemExit):
                raise
            except Exception:
                pass
            else:
                sources = uniq_cdl([catalog.entries[name].add_self_source()] +
                                   catalog.entries[name][SUPERNOVA.RA][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.DEC][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.HOST_RA][0][
                                       QUANTITY.SOURCE].split(',') +
                                   catalog.entries[name][SUPERNOVA.HOST_DEC][0]
                                   [QUANTITY.SOURCE].split(','))
                if SUPERNOVA.HOST_OFFSET_ANG not in catalog.entries[name]:
                    hosa = Decimal(c1.separation(c2).arcsecond)
                    hosa = pretty_num(hosa)
                    catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_OFFSET_ANG,
                        hosa,
                        sources,
                        derived=True,
                        u_value='arcseconds')
                if (SUPERNOVA.COMOVING_DIST in catalog.entries[name]
                        and SUPERNOVA.REDSHIFT in catalog.entries[name]
                        and SUPERNOVA.HOST_OFFSET_DIST
                        not in catalog.entries[name]):
                    offsetsig = get_sig_digits(catalog.entries[name][
                        SUPERNOVA.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                    sources = uniq_cdl(
                        sources.split(',') +
                        (catalog.entries[name][SUPERNOVA.COMOVING_DIST][0][
                            QUANTITY.SOURCE]).split(',') +
                        (catalog.entries[name][SUPERNOVA.REDSHIFT][0][
                            QUANTITY.SOURCE]).split(','))
                    (catalog.entries[name].add_quantity(
                        SUPERNOVA.HOST_OFFSET_DIST,
                        pretty_num(
                            float(catalog.entries[name][
                                SUPERNOVA.HOST_OFFSET_ANG][0][QUANTITY.VALUE])
                            / 3600. * (pi / 180.) *
                            float(catalog.entries[name][
                                SUPERNOVA.COMOVING_DIST][0][QUANTITY.VALUE]) *
                            1000. / (1.0 + float(catalog.entries[name][
                                SUPERNOVA.REDSHIFT][0][QUANTITY.VALUE])),
                            sig=offsetsig), sources))

        catalog.entries[name].sanitize()
        catalog.journal_entries(bury=True, final=True, gz=True)
        cleanupcnt = cleanupcnt + 1
        if catalog.args.travis and cleanupcnt % 1000 == 0:
            break

    catalog.save_caches()

    return
예제 #38
0
                    if plx!='NULL':
                        p = plx
                        perr = eplx
                        pflag = 'GAIADR2' 
                    elif type(result['PLX_VALUE'][indr])!=np.ma.core.MaskedConstant:
                        p=round(float(result['PLX_VALUE'][indr]),2)
                        if type(result['PLX_VALUE'][indr])!=np.ma.core.MaskedConstant:
                            perr=round(float(result['PLX_ERROR'][indr]),2)
                        else:
                            perr=empty    
                        pflag='Simbad'
                    else:
                        try:
                            pos=coord.SkyCoord(ra=ra, dec=dec,unit=(u.hourangle,u.deg),frame='icrs')
                            #AvSF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
                            tableAv = IrsaDust.get_query_table(pos, radius='02d', section='ebv', timeout=60)
                            Av=tableAv['ext SandF mean'].data[0]
                            Averr=tableAv['ext SandF std'].data[0]
                        except:
                            Av=0
                            Averr=0
                        try:    
                            p,perr = map(lambda x: round(x,2), parallax(Teff,Tefferr, float(logg),float(loggerr), V,Verr, M,Merr,Av,Averr))
                            pflag = 'Spec'                             
                        except:
                            p = 'NULL'
                            perr = 'NULL'
                            pflag = 'NULL' 

                    # Comments
                    if result['SP_TYPE'][indr]!='' and result['SP_TYPE'][indr][0]=='M':
예제 #39
0
def read_lasair_json(object_name='ZTF18acsovsw'):
    """
    Read light curve from lasair website API based on object name.

    Parameters
    ----------
    object_name : str
        The LASAIR object name. E.g. object_name='ZTF18acsovsw'

    """
    print(object_name)
    if isinstance(object_name, tuple):
        object_name, z_in = object_name
    else:
        z_in = None

    url = 'https://lasair.roe.ac.uk/object/{}/json/'.format(object_name)

    data = read_json(url)

    objid = data['objectId']
    ra = data['objectData']['ramean']
    dec = data['objectData']['decmean']
    # lasair_classification = data['objectData']['classification']
    tns_info = data['objectData']['annotation']
    photoZ = None
    for cross_match in data['crossmatches']:
        # print(cross_match)
        photoZ = cross_match['photoZ']
        separation_arcsec = cross_match['separationArcsec']
        catalogue_object_type = cross_match['catalogue_object_type']
    if z_in is not None and not np.isnan(z_in):
        redshift = z_in
    else:
        if photoZ is None:  # TODO: Get correct redshift
            try:
                if "z=" in tns_info:
                    photoZ = tns_info.split('z=')[1]
                    redshift = float(photoZ.replace(')', '').split()[0])
                    # print("PHOTOZZZZZZZZZZZZ", redshift, tns_info)
                elif "Z=" in tns_info:
                    photoZ = tns_info.split('Z=')[1]
                    redshift = float(photoZ.split()[0])
                    # print("PHOTOZZZZZZZZZZZZ", redshift, tns_info)
                else:
                    # return
                    print("TRYING ARBITRARY GUESS REDSHIFT = 0.1")
                    redshift = None
            except Exception as e:
                # return
                redshift = None
                print(e)
        else:
            redshift = photoZ

    print("Redshift is {}".format(redshift))
    if redshift is not None:
        objid += "_z={}".format(round(redshift, 2))

    # Get extinction  TODO: Maybe add this to RAPID code
    coo = coord.SkyCoord(ra * u.deg, dec * u.deg, frame='icrs')
    dust = IrsaDust.get_query_table(coo, section='ebv')
    mwebv = dust['ext SandF mean'][0]
    print("MWEBV")
    print(mwebv)

    mjd = []
    passband = []
    mag = []
    magerr = []
    photflag = []
    zeropoint = []
    dc_mag = []
    dc_magerr = []
    magnr, sigmagnr, isdiffpos = [], [], []
    for cand in data['candidates']:
        mjd.append(cand['mjd'])
        passband.append(cand['fid'])
        mag.append(cand['magpsf'])
        if 'sigmapsf' in cand:
            magerr.append(cand['sigmapsf'])
            photflag.append(4096)

            # if cand['magzpsci'] == 0:
            #     print("NO ZEROPOINT")
            #      zeropoint.append(26.2)  # TODO: Tell LASAIR their zeropoints are wrong
            # else:
            #     zeropoint.append(cand['magzpsci'])  #26.2) #
            if cand['magzpsci'] == 0:
                print(object_name, zeropoint)
                raise Exception
                return

            zeropoint.append(cand['magzpsci'])
            dc_mag.append(cand['dc_mag'])
            dc_magerr.append(cand['dc_sigmag'])
            magnr.append(cand['magnr'])
            sigmagnr.append(cand['sigmagnr'])
            isdiffpos.append(cand['isdiffpos'])
        else:
            magerr.append(
                np.nan
            )  #0.01 * cand['magpsf'])  #magerr.append(None)  #magerr.append(0.1 * cand['magpsf'])  #
            photflag.append(0)
            zeropoint.append(np.nan)  #26.2)
            dc_mag.append(np.nan)
            dc_magerr.append(np.nan)
            magnr.append(np.nan)
            sigmagnr.append(np.nan)
            isdiffpos.append(None)

    mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos = convert_lists_to_arrays(
        mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr,
        magnr, sigmagnr, isdiffpos)

    # deleteindexes = np.where(magerr == None)  # [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29]  #
    # mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos = delete_indexes(deleteindexes, mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos)
    deleteindexes = np.where(
        (photflag == 0) & (mjd > min(mjd[photflag > 0]))
    )  # delete where nondetections after first detection
    mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos = delete_indexes(
        deleteindexes, mjd, passband, mag, magerr, photflag, zeropoint, dc_mag,
        dc_magerr, magnr, sigmagnr, isdiffpos)
    deleteindexes = np.where(
        (mag <
         (np.median(mag[photflag == 0]) - 0.5 * np.std(mag[photflag == 0]))) &
        (photflag == 0))  # Remove non detection outliers
    mjd, passband, mag, magerr, photflag, zeropoint, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos = delete_indexes(
        deleteindexes, mjd, passband, mag, magerr, photflag, zeropoint, dc_mag,
        dc_magerr, magnr, sigmagnr, isdiffpos)

    return mjd, passband, mag, magerr, photflag, zeropoint, ra, dec, objid, redshift, mwebv, dc_mag, dc_magerr, magnr, sigmagnr, isdiffpos
예제 #40
0
def read_lasair_json(object_name='ZTF18acsovsw'):
    """
    Read light curve from lasair website API based on object name.

    Parameters
    ----------
    object_name : str
        The LASAIR object name. E.g. object_name='ZTF18acsovsw'

    """
    print(object_name)
    if isinstance(object_name, tuple):
        object_name, z_in = object_name
    else:
        z_in = None

    url = 'https://lasair.roe.ac.uk/object/{}/json/'.format(object_name)

    data = read_json(url)

    objid = data['objectId']
    ra = data['objectData']['ramean']
    dec = data['objectData']['decmean']
    # lasair_classification = data['objectData']['classification']
    tns_info = data['objectData']['annotation']
    photoZ = None
    for cross_match in data['crossmatches']:
        # print(cross_match)
        photoZ = cross_match['photoZ']
        separation_arcsec = cross_match['separationArcsec']
        catalogue_object_type = cross_match['catalogue_object_type']
    if photoZ is None:  # TODO: Get correct redshift
        try:
            if "z=" in tns_info:
                photoZ = tns_info.split('z=')[1]
                redshift = float(photoZ.replace(')', '').split()[0])
            elif "Z=" in tns_info:
                photoZ = tns_info.split('Z=')[1]
                redshift = float(photoZ.split()[0])
            else:
                redshift = None
        except Exception as e:
            redshift = None
            print(e)
    else:
        redshift = photoZ
    if z_in is not None:
        redshift = z_in
    print("Redshift is {}".format(redshift))
    objid += "_z={}".format(round(redshift, 2))

    # Get extinction  TODO: Maybe add this to RAPID code
    coo = coord.SkyCoord(ra * u.deg, dec * u.deg, frame='icrs')
    dust = IrsaDust.get_query_table(coo, section='ebv')
    mwebv = dust['ext SandF mean'][0]
    print("MWEBV")
    print(mwebv)

    mjd = []
    passband = []
    mag = []
    magerr = []
    photflag = []
    zeropoint = []
    for cand in data['candidates']:
        mjd.append(cand['mjd'])
        passband.append(cand['fid'])
        mag.append(cand['magpsf'])
        if 'sigmapsf' in cand:
            magerr.append(cand['sigmapsf'])
            photflag.append(4096)
            if cand['magzpsci'] == 0:
                zeropoint.append(26.2)  # TODO: Tell LASAIR their zeropoints are wrong
            else:
                zeropoint.append(cand['magzpsci'])
        else:
            magerr.append(0.1 * cand['magpsf'])
            photflag.append(0)
            zeropoint.append(26.2)

    mjd, passband, mag, magerr, photflag, zeropoint = convert_lists_to_arrays(mjd, passband, mag, magerr, photflag, zeropoint)

    deleteindexes = np.where(magerr == None)
    mjd, passband, mag, magerr, photflag, zeropoint = delete_indexes(deleteindexes, mjd, passband, mag, magerr, photflag, zeropoint)

    return mjd, passband, mag, magerr, photflag, zeropoint, ra, dec, objid, redshift, mwebv