Esempio n. 1
0
    def __init__(self, name, ra=None, dec=None, redshift=None, classification=None,
                 sfd_dir=None, **kwargs):
        """
        """
        kwargs = self._load_config_(**kwargs)

        self.name = name
        self.redshift = redshift
        self.classification = classification
        self.sfd_dir = sfd_dir
        self.filter_dict = kwargs.pop('filter_dict', _DEFAULT_FILTERS)
        
        if ra is not None and dec is not None:
            self.ra = ra
            self.dec = dec
            if _HAS_SFDMAP:
                if self.sfd_dir is None:
                    self.dustmap = sfdmap.SFDMap()
                else:
                    self.dustmap = sfdmap.SFDMap(self.sfd_dir)
                self.mwebv = self.dustmap.ebv(ra, dec)
            else:
                self.mwebv = 0.0
            
        r = requests.post('http://skipper.caltech.edu:8080/cgi-bin/growth/print_lc.cgi',
                          auth=(self.user, self.passwd),
                          data={'name': self.name})
        r = r.text.split('<table border=0 width=850>')[-1]
        r = r.replace(' ', '').replace('\n', '')
        r = '\n'.join(r.split('<br>'))

        self.table_orig = Table.read(r, format='ascii.csv')
        self._remove_duplicates_()
Esempio n. 2
0
 def dustmap(self):
     """Instance of SFD98 dust map"""
     if self._dustmap is not None:
         return self._dustmap
     elif _HAS_SFDMAP:
         if self.sfd_dir is None:
             if os.environ.get('SFD_DIR') is not None:
                 self._dustmap = sfdmap.SFDMap()
             else:
                 return None
         else:
             self._dustmap = sfdmap.SFDMap(self.sfd_dir)
         return self._dustmap
     else:
         return None
Esempio n. 3
0
def calcSpecDF(filenames, grid, gs, data, specobjid):
    specDF = np.zeros((len(filenames), len(grid)))
    ebvMap = sfdmap.SFDMap('E:\DB\sfddata-master')

    for i in xrange(len(filenames)):
        print('Calculating Spectrum: '),
        gi = gs.iloc[i]
        wli = 10**data[i]['loglam']
        fluxi = data[i]['flux']
        ivari = data[i]['ivar']
        assert specobjid[i] == gi[
            'specobjid'], 'Files do not match galaxies dataframe'
        ra = gi['ra']
        dec = gi['dec']
        ebvi = ebvMap.ebv(ra, dec)
        speci = deredden_spectrum(wli, fluxi, ebvi)
        speci = remove_bad_pixels(speci, ivari)
        speci = zero_to_nan(speci)
        wli = de_redshift(wli, gi['z'])
        speci = same_grid_single(grid, wli, speci)
        speci = medfilt(speci)
        speci = norm_spectrum(speci)
        specDF[i] = speci
        print(str(round(100 * (i + 1) / float(len(filenames)), 3)) + '%')
    return specDF
Esempio n. 4
0
def extinction(ra, dec):
    dustmap = sfdmap.SFDMap(
        "/Users/bhagyasubrayan/Desktop/sncosmo/sfddata-master")
    c = SkyCoord(ra + dec, unit=(u.hourangle, u.deg))
    ebv = dustmap.ebv(c)
    Ar = 2.751 * ebv  #1998 prescription
    Ag = 3.793 * ebv
    Ai = 2.086 * ebv
    Az = 1.479 * ebv
    return Ag, Ar, Ai, Az, ebv
Esempio n. 5
0
 def reduce_mw_extinct(self):
     '''
     银河消光改正
     '''
     wav = self.wavelen_list
     flux = self.flux_list
     flux_err = self.flux_e_list
     m = sfdmap.SFDMap('./sfddata-master')
     ebv = m.ebv(self.get_ra(), self.get_dec())
     R_V = 3.1
     A_V = ebv * R_V
     A_lam = ccm89(wav, A_V, R_V)
     fac = 10**(-0.4 * A_lam)
     flux_deredden = flux / fac
     flux_err_deredden = flux_err / fac
     self.flux_list = flux_deredden
     self.flux_e_list = flux_err_deredden
     return self
Esempio n. 6
0
def add_sfd():
    m = sfdmap.SFDMap("sfddata-master")  # Must be installed separately

    x1d = linspace(-pi, pi, 400)
    y1d = linspace(-pi / 2., pi / 2, 200)
    xvals, yvals = meshgrid(x1d, y1d)
    zvals = xvals * 0.
    for i in range(len(x1d)):
        for j in range(len(y1d)):
            zvals[j, i] = m.ebv(x1d[i] * degrad, y1d[j] * degrad)

    CS = plt.contourf(-xvals,
                      yvals,
                      zvals,
                      levels=[0, 0.01, 0.02, 0.03],
                      colors=[(1, 0, 0), (1, 0.5, 0.5), (1, 0.75, 0.75),
                              (1., 0.9, 0.9)],
                      alpha=0.3)
"""
import h5py
import numpy as np
import pandas as pds

import astropy.wcs as awc
import astropy.io.ascii as asc
import astropy.io.fits as fits

import subprocess as subpro
from extinction_redden import A_wave
from astropy.coordinates import SkyCoord

#. dust map with the recalibration by Schlafly & Finkbeiner (2011)
import sfdmap
E_map = sfdmap.SFDMap('/home/xkchen/module/dust_map/sfddata_maskin')

#. dust map of SFD 1998
# from dustmaps.sfd import SFDQuery
# sfd = SFDQuery()

from mpi4py import MPI
commd = MPI.COMM_WORLD
rank = commd.Get_rank()
cpus = commd.Get_size()

#.pipe process
from img_resample import resamp_func
from BCG_SB_pro_stack import BCG_SB_pros_func, single_img_SB_func
from fig_out_module import arr_jack_func
from light_measure import light_measure_weit
Esempio n. 8
0
ObsHistID = ObsHistID.astype(np.int)

print(MJDs)

assert np.all(np.diff(MJDs) >= 0.)
assert np.all(np.diff(ObsHistID) >= 0.)
assert np.all(np.diff(_ObsHistID) >= 0.)

##  Closing the connection to the database file
conn.close()

WFDFIELDIDS = []

##  By default, a scaling of 0.86 is applied to the map values to reflect the recalibration by Schlafly & Finkbeiner (2011)
##  https://github.com/kbarbary/sfdmap.
sfd = sfdmap.SFDMap('/global/homes/m/mjwilson/SFD/')

YEAR = 5
_ObsHistID = _ObsHistID[:np.round(YEAR * 0.1 * len(_ObsHistID)).astype(np.int)]

for i, x in enumerate(ObsHistID):
    if x in _ObsHistID:
        WFDFIELDIDS.append(FieldIDs[i])

        print(100. * i / len(ObsHistID))

for i, id in enumerate(_FieldID):
    if id in WFDFIELDIDS:
        ##  RA, DEC [degrees].
        ebv = sfd.ebv(_FieldRa[i], _FieldDec[i])
Esempio n. 9
0
    def do(self):
        """
        Predicts photometric redshifts from RA and DEC points in SDSS

        An outline of the algorithem is:

        first pull from SDSS u,g,r,i,z magnitudes from SDSS; 
            should be able to handle a list/array of RA and DEC

        place u,g,r,i,z into a vector, append the derived information into the data array

        predict the information from the model

        return the predictions in the same order to the user

        inputs:
            Ra: list or array of len N, right ascensions of target galaxies in decimal degrees
            Dec: list or array of len N, declination of target galaxies in decimal degrees
            search: float, arcmin tolerance to search for the object in SDSS Catalogue
            path_to_model: str, filepath to saved model for prediction
        
        Returns:
            predictions: array of len N, photometric redshift of input galaxy

        """
        
        try:
            nowdate = datetime.datetime.utcnow() - datetime.timedelta(1)
            from django.db.models import Q #HAS To Remain Here, I dunno why
            print('Entered the photo_z Pan-Starrs CNN cron')        
            #save time b/c the other cron jobs print a time for completion
                
            m = sfdmap.SFDMap(mapdir=djangoSettings.STATIC_ROOT+'/sfddata-master')
            model_filepath = djangoSettings.STATIC_ROOT+'PS_7_01_ultimate.hdf5' #fill this in with one
                
            NB_BINS = 270
            BATCH_SIZE = 64
            ZMIN = 0.0
            ZMAX = 0.6
            BIN_SIZE = (ZMAX - ZMIN) / NB_BINS
            range_z = np.linspace(ZMIN, ZMAX, NB_BINS + 1)[:NB_BINS]
            
            transients = Transient.objects.filter(Q(host__isnull=False))
            
            my_index = np.array(range(0,len(transients))) #dummy index used in DF, then used to create a mapping from matched galaxies back to these hosts
               
            transient_dictionary = dict(zip(my_index,transients))
                        
            #another script will place the images into the data model, just pull them out here...
            #print('Original length of Transients: ',len(transients))
            for i,T in enumerate(transients): #get rid of fake entries, or entries not yet classified or given a PS cutout
                ID = T.name
                if not(os.path.isfile(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,'g'))) or not(os.path.isfile(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,'r'))) or not(os.path.isfile(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,'i'))) or not(os.path.isfile(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,'z'))) or not(os.path.isfile(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,'y'))): #check if cutout exists from that transient name in dir...
                #It would be simpler maybe to just go around the data model, make a dictionary for the images in the cutout folder and grab them from there...
                    transient_dictionary.pop(i) #if I don't have the image in the data model, drop that transient, wait until the other cron does its job.
 
            #print('attempting to create model')
            mymodel = create_model_groundup_decay_ultimate(NB_BINS)
            #print('attempting to load model')
            mymodel.load_weights(model_filepath)
            
            Number = 1000 #!!!
            
            #first do a modulo to find how many 1000's exist in the transient dictionary
            outer_loop_how_many = len(transient_dictionary)//Number
            remainder_how_many = len(transient_dictionary)%Number

            #next take these numbers to make a list of the keys to transient_dictionary, then query parts of that list in turn...
            whats_left = list(transient_dictionary.keys()) #list holding the keys to transient after removing bad entries...

            #create a holding array for predictions
            posterior = np.zeros((len(whats_left),NB_BINS)) #(how many unique hosts,how many bins of discrete redshift)
            for outer_index in range(outer_loop_how_many):
                use_these_indices = whats_left[outer_index*Number:(outer_index+1)*Number]
                #now reset the data array
                DATA = np.zeros((Number,104,104,5)) #safe because if not 1000 would have //1000 = 0 and range(0) is empty []
                #Now reset the ra and dec lists so I can grab more extinctions
                RA = []
                DEC = []
                for inner_index,value in enumerate(use_these_indices):
                    T = transient_dictionary[value]
                    ID = T.name
                    for j,F in enumerate(['g','r','i','z','y']):
                        DATA[inner_index,:,:,j] = np.load(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,F))
                    RA.append(T.host.ra)
                    DEC.append(T.host.dec)

                #Next run model and place into posterior
                Extinctions = m.ebv(np.array(RA),np.array(DEC))
                posterior[outer_index*Number:(outer_index+1)*Number] = mymodel.predict([DATA,Extinctions])

            #next use the remainder

            use_these_indices = whats_left[((outer_loop_how_many)*Number):((outer_loop_how_many)*Number)+remainder_how_many]
            #Reset Data, only create array large enough to hold whats left
            DATA = np.zeros((len(use_these_indices),104,104,5))
            #reset ra,dec
            RA = []
            DEC = []
            for inner_index,value in enumerate(use_these_indices):
                T = transient_dictionary[value]
                ID = T.name
                for j,F in enumerate(['g','r','i','z','y']):
                    DATA[inner_index,:,:,j] = np.load(djangoSettings.STATIC_ROOT+'/cutouts/cutout_{}_{}.npy'.format(ID,F))
                RA.append(T.host.ra)
                DEC.append(T.host.dec)

            Extinctions = m.ebv(np.array(RA),np.array(DEC))
            posterior[((outer_loop_how_many)*Number):((outer_loop_how_many)*Number)+remainder_how_many] = mymodel.predict([DATA,Extinctions])
            #Now posterior should be full!

            #print('done')
            point_estimates = np.sum(range_z*posterior,1)
                
            error = np.ones(len(point_estimates))
            for i in range(len(point_estimates)):
                error[i] = np.std(np.random.choice(a=range_z,size=1000,p=posterior[i,:]/np.sum(posterior[i,:]),replace=True)) #this could be parallized i'm sure
                
            for i,value in enumerate(list(transient_dictionary.keys())):
                T = transient_dictionary[value]
                if not(T.host.photo_z_PSCNN): 
                    T.host.photo_z_PSCNN = point_estimates[i]
                    T.host.photo_z_err_PSCNN = error[i]
                    #T.host.photo_z_posterior_PSCNN = posterior[i] #Gautham suggested we add it to the host model
                    #T.host.photo_z_source = 'YSE internal PS CNN' #this is not neccessary its in the name
                    T.host.save() #takes a long time and then my query needs to be reset which is also a long time
    
        except Exception as e:
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print("""PS Photo-z cron failed with error %s at line number %s"""%(e,exc_tb.tb_lineno))
Esempio n. 10
0
from scipy.interpolate import interp1d
import numpy
from sklearn.preprocessing import Imputer
from scipy import signal

import sfdmap
global m
m = sfdmap.SFDMap('sfddata-master/')

#wget https://github.com/kbarbary/sfddata/archive/master.tar.gz
#tar xzf master.tar.gz


def deredden_spectrum(wl, spec, E_bv):
    """
    function dereddens a spectrum based on the given extinction_g value and Fitzpatric99 model
    IMPORTANT: the spectrum should be in the observer frame (do not correct for redshift)
    """
    # dust model
    wls = numpy.array([2600, 2700, 4110, 4670, 5470, 6000, 12200, 26500])
    a_l = numpy.array([6.591, 6.265, 4.315, 3.806, 3.055, 2.688, 0.829, 0.265])
    f_interp = interp1d(wls, a_l, kind="cubic")

    a_l_all = f_interp(wl)
    #E_bv = extinction_g / 3.793
    A_lambda = E_bv * a_l_all
    spec_real = spec * 10**(A_lambda / 2.5)

    return spec_real

Esempio n. 11
0
def preprocess(DF, PATH='../DATA/sfddata-master/', ebv=True):
    if ebv:
        m = sfdmap.SFDMap(PATH)
        EBV = m.ebv(DF['raMean'].values.astype(np.float32),
                    DF['decMean'].values.astype(np.float32))

        DF['ebv'] = EBV
    else:
        DF['ebv'] = 0.0

    def convert_flux_to_luptitude(f, b, f_0=3631):
        return -2.5 / np.log(10) * (np.arcsinh(
            (f / f_0) / (2 * b)) + np.log(b))

    b_g = 1.7058474723241624e-09
    b_r = 4.65521985283191e-09
    b_i = 1.2132217745483221e-08
    b_z = 2.013446972858555e-08
    b_y = 5.0575501316874416e-08

    MEANS = np.array([
        18.70654578, 17.77948707, 17.34226094, 17.1227873, 16.92087669,
        19.73947441, 18.89279411, 18.4077393, 18.1311733, 17.64741402,
        19.01595669, 18.16447837, 17.73199409, 17.50486095, 17.20389615,
        19.07834251, 18.16996592, 17.71492073, 17.44861273, 17.15508793,
        18.79100201, 17.89569908, 17.45774026, 17.20338482, 16.93640741,
        18.62759241, 17.7453392, 17.31341498, 17.06194499, 16.79030564,
        0.02543223
    ])

    STDS = np.array([
        1.7657395, 1.24853534, 1.08151972, 1.03490545, 0.87252421, 1.32486758,
        0.9222839, 0.73701807, 0.65002723, 0.41779001, 1.51554956, 1.05734494,
        0.89939638, 0.82754093, 0.63381611, 1.48411417, 1.05425943, 0.89979008,
        0.83934385, 0.64990996, 1.54735158, 1.10985163, 0.96460099, 0.90685922,
        0.74507053, 1.57813401, 1.14290345, 1.00162105, 0.94634726, 0.80124359,
        0.01687839
    ])

    data_columns = [
        'gFKronFlux', 'rFKronFlux', 'iFKronFlux', 'zFKronFlux', 'yFKronFlux',
        'gFPSFFlux', 'rFPSFFlux', 'iFPSFFlux', 'zFPSFFlux', 'yFPSFFlux',
        'gFApFlux', 'rFApFlux', 'iFApFlux', 'zFApFlux', 'yFApFlux',
        'gFmeanflxR5', 'rFmeanflxR5', 'iFmeanflxR5', 'zFmeanflxR5',
        'yFmeanflxR5', 'gFmeanflxR6', 'rFmeanflxR6', 'iFmeanflxR6',
        'zFmeanflxR6', 'yFmeanflxR6', 'gFmeanflxR7', 'rFmeanflxR7',
        'iFmeanflxR7', 'zFmeanflxR7', 'yFmeanflxR7', 'ebv'
    ]

    X = DF[data_columns].values.astype(np.float32)
    X[:, 0:30:5] = convert_flux_to_luptitude(X[:, 0:30:5], b=b_g)
    X[:, 1:30:5] = convert_flux_to_luptitude(X[:, 1:30:5], b=b_r)
    X[:, 2:30:5] = convert_flux_to_luptitude(X[:, 2:30:5], b=b_i)
    X[:, 3:30:5] = convert_flux_to_luptitude(X[:, 3:30:5], b=b_z)
    X[:, 4:30:5] = convert_flux_to_luptitude(X[:, 4:30:5], b=b_y)

    X = (X - MEANS) / STDS
    X[X > 20] = 20
    X[X < -20] = -20
    X[np.isnan(X)] = -20

    return X