def avgmat_Ngrid(base_output, mats, lcs, output_dir="COSMO"):
    import numpy as np
    import matplotlib.pyplot as plt
    import array
    import math
    import os
    from astropy import cosmology as cosmo
    from astropy.cosmology import FlatLambdaCDM
    import re

    cosmo2 = FlatLambdaCDM(H0=70, Om0=0.3)

    lists, zs, mbs, mbes, xs, mu_syns, mus = [], [], [], [], [], [], []

    for mat, lc in zip(mats, lcs):
        list1, z1, mb1, mb1e = np.loadtxt(output_dir + "/lcparam_" + lc +
                                          ".txt",
                                          usecols=(0, 1, 4, 5),
                                          unpack=True,
                                          dtype="string")
        z1 = z1.astype(float)
        mb1 = mb1.astype(float)
        mb1e = mb1e.astype(float)
        lists.append(list1)
        zs.append(z1)
        mbs.append(mb1)
        mbes.append(mb1e)

        x = cosmo2.luminosity_distance(zs[0]).value
        mu_syn1 = 5.0 * (np.log10(x)) + 25.0 - 19.35
        mu1 = mb1 - mu_syn1

        xs.append(x)
        mu_syns.append(mu_syn1)
        mus.append(mu1)
        output_dir + "/lcparam_" + lc + ".txt"

        # list2, z2,mb2,mb2e = np.loadtxt(output_dir+'/lcparam_'+lc2+'.txt', usecols=(0, 1,4,5), unpack=True, dtype='string')

    # z2 = z1 #using z1 so z lines up
    # mb2 = mb2.astype(float)
    # mb2e = mb2e.astype(float)

    # x=cosmo2.luminosity_distance(z2).value
    # mu_syn2=5.0*(np.log10(x))+25.0-19.35
    # mu2=mb2-mu_syn2
    mua = np.mean(mus, axis=0)
    # print mua.shape
    muae = np.mean(mbes, axis=0)
    # print muae.shape
    # asdf
    mua = np.array(mu_syns[0]) + mua

    # mua=(mu1+mu2)/2.0
    # muae=(mb1e+mb2e)/2.0
    # mua=mu_syn1+mua
    # print output_dir+'/lcparam_'+lc1+'.txt'
    # print output_dir+'/lcparam_'+lc2+'.txt'
    # stop
    # print z1
    # stop
    f1 = open(output_dir + "/lcparam_" + base_output + ".txt",
              "w")  # this is the file for cosmomc
    f1.write(
        "#name zcmb zhel dz mb dmb x1 dx1 color dcolor 3rdvar d3rdvar cov_m_s cov_m_c cov_s_c set ra dec biascor \n"
    )  # standard format
    for x in range(0, len(zs[0])):
        f1.write(
            str(lists[0][x]) + " " + str(zs[0][x]) + " " + str(zs[0][x]) +
            " 0.0 " + str(mua[x]) + " " + str(muae[x]) +
            " 0 0 0 0 0 0 0 0 0 0 0 0\n")
    f1.close()
    # print output_dir+'/sys_'+mat1+'.txt'
    # print output_dir+'/sys_'+mat2+'.txt'

    syss = []
    for mat, lc in zip(mats, lcs):
        sys1 = np.loadtxt(output_dir + "/sys_" + mat + ".txt",
                          unpack=True,
                          dtype="string")
        sys1 = sys1.astype(float)
        syss.append(sys1)
    # print syss[0].shape
    savg = np.mean(syss, axis=0)
    # print savg.shape
    # asdf
    scount = syss[0][0]
    sys1 = syss[0]
    sys3 = open(output_dir + "/sys_" + base_output + ".txt", "w")
    sys3.write(str(scount) + "\n")
    for x in range(1, len(sys1)):
        sys3.write(str(savg[x]) + "\n")
    sys3.close()
    dataset(output_dir, base_output, "", "", sys=1)
    dataset(output_dir, base_output, "_nosys", "", sys=0)
    print(output_dir + "/sys_" + base_output + ".txt")
Exemplo n.º 2
0
def fetch_great_wall(data_home=None,
                     download_if_missing=True,
                     xlim=(-375, -175),
                     ylim=(-300, 200),
                     cosmo=None):
    """Get the 2D SDSS "Great Wall" distribution, following Cowan et al 2008

    Parameters
    ----------
    data_home : optional, default=None
        Specify another download and cache folder for the datasets. By default
        all scikit learn data is stored in '~/astroML_data' subfolders.

    download_if_missing : optional, default=True
        If False, raise a IOError if the data is not locally available
        instead of trying to download the data from the source site.

    xlim, ylim : tuples or None
        the limits in Mpc of the data: default values are the same as that
        used for the plots in Cowan 2008.  If set to None, no cuts will
        be performed.

    cosmo : `astropy.cosmology` instance specifying cosmology
        to use when generating the sample.  If not provided,
        a Flat Lambda CDM model with H0=73.2, Om0=0.27, Tcmb0=0 is used.

    Returns
    -------
    data : ndarray, shape = (Ngals, 2)
        grid of projected (x, y) locations of galaxies in Mpc
    """
    # local imports so we don't need dependencies for loading module
    from scipy.interpolate import interp1d

    # We need some cosmological information to compute the r-band
    #  absolute magnitudes.
    if cosmo is None:
        cosmo = FlatLambdaCDM(H0=73.2, Om0=0.27, Tcmb0=0)

    data = fetch_sdss_specgals(data_home, download_if_missing)

    # cut to the part of the sky with the "great wall"
    data = data[(data['dec'] > -7) & (data['dec'] < 7)]
    data = data[(data['ra'] > 80) & (data['ra'] < 280)]

    # do a redshift cut, following Cowan et al 2008
    z = data['z']
    data = data[(z > 0.01) & (z < 0.12)]

    # first sample the distance modulus on a grid
    zgrid = np.linspace(min(data['z']), max(data['z']), 100)
    mugrid = cosmo.distmod(zgrid).value
    f = interp1d(zgrid, mugrid)
    mu = f(data['z'])

    # do an absolute magnitude cut at -20
    Mr = data['petroMag_r'] + data['extinction_r'] - mu
    data = data[Mr < -21]

    # compute distances in the equatorial plane
    # first sample comoving distance
    Dcgrid = cosmo.comoving_distance(zgrid).value
    f = interp1d(zgrid, Dcgrid)
    dist = f(data['z'])

    locs = np.vstack([
        dist * np.cos(data['ra'] * np.pi / 180.),
        dist * np.sin(data['ra'] * np.pi / 180.)
    ]).T

    # cut on x and y limits if specified
    if xlim is not None:
        locs = locs[(locs[:, 0] > xlim[0]) & (locs[:, 0] < xlim[1])]
    if ylim is not None:
        locs = locs[(locs[:, 1] > ylim[0]) & (locs[:, 1] < ylim[1])]

    return locs
import healpy as hp
from scipy.stats import norm
from scipy.interpolate import interp1d
import astropy.io.fits as fits
from astropy.table import Table
import numpy as n
print('Degrades a mock catalogue to input cosmopipes')
print('------------------------------------------------')
t0 = time.time()

# simulation name
env = 'MD10'  # sys.argv[1]

# cosmology set up
if env == "MD10" or env == "MD04":
    cosmoMD = FlatLambdaCDM(H0=67.77 * u.km / u.s / u.Mpc,
                            Om0=0.307115)  # , Ob0=0.048206)
    h = 0.6777
    L_box = 1000.0 / h
    cosmo = cosmoMD
if env == "UNIT_fA1_DIR" or env == "UNIT_fA1i_DIR" or env == "UNIT_fA2_DIR":
    cosmoUNIT = FlatLambdaCDM(H0=67.74 * u.km / u.s / u.Mpc, Om0=0.308900)
    h = 0.6774
    L_box = 1000.0 / h
    cosmo = cosmoUNIT

# where the catalogue is :
test_dir = os.path.join(os.environ[env])
# catalog
path_2_RS_catalog = os.path.join(test_dir, 'cat_eRO_CLU_RS', '000356.fit')

tt = Table.read(path_2_RS_catalog)
Exemplo n.º 4
0
def ObsRealism(inputName,outputName,band='r',
                cosmo=FlatLambdaCDM(H0=70,Om0=0.3),
                common_args = { 
                                'redshift'      : 0.1,   # mock observation redshift
                                'rebin_to_CCD'  : False, # rebin to CCD angular scale
                                'CCD_scale'     : 0.396, # CCD angular scale in [arcsec/pixel]
                                'add_false_sky' : False, # add gaussian sky
                                'false_sky_sig' : 24.2,  # gaussian sky standard dev [AB mag/arcsec2]
                                'add_false_psf' : False, # convolve with gaussian psf
                                'false_psf_fwhm': 1.0,   # gaussian psf FWHM [arcsec]
                                'add_poisson'   : False, # add poisson noise to galaxy
                                'add_sdss_sky'  : False, # insert into real SDSS sky (using sdss_args)
                                'add_sdss_psf'  : False, # convolve with real SDSS psf (using sdss_args)

                              },
               sdss_args    = {
                                'sdss_run'      : 745,       # sdss run
                                'sdss_rerun'    : 40,        # sdss rerun
                                'sdss_camcol'   : 1,         # sdss camcol
                                'sdss_field'    : 517,       # sdss field
                                'sdss_ra'       : 236.1900,  # ra for image centroid
                                'sdss_dec'      : -0.9200,   # ec for image centroid
                              }
               ):
    
    '''
    Add realism to idealized unscaled image.
    
    "redshift": The redshift at which the synthetic image is to be mock-observed. Given that the image should be in surface brightness units and appropriately dimmed by (1+z)^-5, the redshift is only used to determine the angular-to-physical scale of the image -- to which it is appropriately rebinned corresponding to the desired CCD pixel scale.
    
    "rebin_to_CCD": If TRUE, the image is rebinned to the CCD scale identified by the "CCD_scale" keyword. The rebinning is determined by first computing the physical-to-angular scale associated with the target redshift [kpc/arcsec]. Combining this number with the scale of the original image in physical units [kpc/pixel], we obtain the rebinning factor that is neccesary to bring the image to the desired CCD pixel scale [arcsec/pixel].
    
    "CCD_scale": The CCD scale to which the images are rebinned if rebin_to_CCD is TRUE.
    
    "add_false_sky": If TRUE, a Gaussian sky is added to the image with a noise level that is idenfitied by the "false_sky_sig" keyword.
    
    "false_sky_sig": The standard deviation of Gaussian sky that is added to the image if "add_false_sky" is TRUE. The value must be expressed in relative magnitude units (AB mag/arcsec2).
    
    "add_false_psf": If TRUE, a Gaussian PSF is added to the image with a FWHM that is idenfitied by the "false_psf_fwhm" keyword.
    
    "false_psf_fwhm": The FWHM of the PSF that is convolved with the image if "add_false_psf" is TRUE. The value must be expressed in arcsec.
    
    "add_poisson": If TRUE, add Poisson noise to the image using either the calibration info and gain from the real image properties ("add_sdss_sky"=TRUE) or generic values derived from averages over SDSS fields.
    
    "add_sdss_sky": If True, insert into real SDSS sky using arguments in "sdss_args".
    
    "add_sdss_psf": If True and "add_sdss_sky"=True, reconstruct the PSF at the injection location and convolve with the image.
    '''
    
    # mock observation redshift
    redshift = common_args['redshift']
    # speed of light [m/s]
    speed_of_light = 2.99792458e8
    # kiloparsec per arcsecond scale
    kpc_per_arcsec = cosmo.kpc_proper_per_arcmin(z=redshift).value/60. # [kpc/arcsec]
    # luminosity distance in Mpc
    luminosity_distance = cosmo.luminosity_distance(z=redshift) # [Mpc]
    
    # img header and data
    with fits.open(inputName,mode='readonly') as hdul:
        # img header
        header = hdul[0].header
        # img data
        img_data = hdul[0].data
    
#    # header properties
#    sim_tag = header['SIMTAG']
#    sub_tag = header['SUBTAG']
#    isnap = header['ISNAP']
#    axis = header['CAMERA']
#    band = header['FILTER'][0]
#
#    # unique simulID
#    simulID = '{}-{}-{}-{}'.format(sim_tag,sub_tag,isnap,axis)
#
#    band = header['FILTER'][0]

    # collect physical pixel scale
    kpc_per_pixel = header['CDELT1']/1000. # [kpc/pixel]
    # compute angular pixel scale from cosmology
    arcsec_per_pixel = kpc_per_pixel / kpc_per_arcsec # [arcsec/pixel]
     
    # img in AB nanomaggies per arcsec2
    img_nanomaggies = 10**(-0.4*(img_data-22.5)) # [nmgys/arcsec2]
    # apply pixel scale [arcsec/pixel]2 to convert to calibrated flux
    img_nanomaggies *= arcsec_per_pixel**2 # [nmgs]
    # update units of image header to linear calibrated scale
    header['BUNIT'] = 'AB nanomaggies'
    
#    print('\nRaw image:')
#    print('kpc_per_arcsec: {}'.format(kpc_per_arcsec))
#    print('kpc_per_pixel: {}'.format(kpc_per_pixel))
#    print('arcsec_per_pixel: {}'.format(arcsec_per_pixel))
#    m_AB = -2.5*np.log10(np.sum(img_nanomaggies))+22.5
#    print('AB_magnitude: {} at z={}'.format(m_AB,redshift))
#    M_AB = m_AB-5*np.log10(luminosity_distance.value)-25
#    print('AB_Magnitude: {}'.format(M_AB))

    # Add levels of realism
    
    if common_args['rebin_to_CCD']:
        '''
        Rebin image to a given angular CCD scale
        '''
        # telescope ccd angular scale
        ccd_scale = common_args['CCD_scale']
        # axes of original image
        nPixelsOld = img_nanomaggies.shape[0]
        # axes of regridded image
        nPixelsNew = int(np.floor((arcsec_per_pixel/ccd_scale)*nPixelsOld))
        # rebin to new ccd scale
        img_nanomaggies = rebin(img_nanomaggies,(nPixelsNew,nPixelsNew))
        # new kpc_per_pixel on ccd
        kpc_per_pixel = kpc_per_arcsec * ccd_scale
        # new arcsec per pixel
        arcsec_per_pixel = ccd_scale
        # header updates
        if nPixelsNew%2: CRPIX = float(nPixelsNew/2)
        else: CRPIX = float(nPixelsNew/2)+0.5
        header['CRPIX1'] = CRPIX
        header['CRPIX2'] = CRPIX
        header['CDELT1'] = kpc_per_pixel*1000
        header['CDELT2'] = kpc_per_pixel*1000
#        print('\nAfter CCD scaling:')
#        print('kpc_per_arcsec: {}'.format(kpc_per_arcsec))
#        print('kpc_per_pixel: {}'.format(kpc_per_pixel))
#        print('arcsec_per_pixel: {}'.format(arcsec_per_pixel))
#        m_AB = -2.5*np.log10(np.sum(img_nanomaggies))+22.5
#        print('AB_magnitude: {} at z={}'.format(m_AB,redshift))
#        M_AB = m_AB-5*np.log10(luminosity_distance.value)-25
#        print('AB_Magnitude: {}'.format(M_AB))

    # convolve with gaussian psf
    if common_args['add_false_psf']:
        '''
        Add Gaussian PSF to image with provided FWHM in
        arcseconds.
        '''
        std = common_args['false_psf_fwhm']/arcsec_per_pixel/2.355
        kernel = Gaussian2DKernel(stddev=std)
        img_nanomaggies = convolve(img_nanomaggies, kernel)
        
    # add poisson noise to image
    if common_args['add_poisson'] and not common_args['add_sdss_sky']:
        '''
        Add shot noise to image assuming the average SDSS
        field properties for zeropoint, airmass, atmospheric
        extinction, and gain. The noise calculation assumes
        that the number of counts in the converted image is 
        the mean number of counts in the Poisson distribution.
        Thereby, the standard error in that number of counts 
        is the square root of the number of counts in each 
        pixel.
        
        For details on the methods applied here, see:
        http://classic.sdss.org/dr7/algorithms/fluxcal.html
        
        Average quantites obtained from SkyServer SQL form.
        http://skyserver.sdss.org/dr7/en/tools/search/sql.asp
        DR7 Query Form:
        SELECT AVG(airmass_x),AVG(aa_x),AVG(kk_x),AVG(gain_x)
        FROM Field
        '''
        # average sdss photometric field properties (gain is inverse gain)
        airmass  = {'u':1.178, 'g':1.178, 'r':1.177, 'i':1.177, 'z':1.178}
        aa       = {'u':-23.80,'g':-24.44,'r':-24.03,'i':-23.67,'z':-21.98}
        kk       = {'u':0.5082,'g':0.1898,'r':0.1032,'i':0.0612,'z':0.0587}
        gain     = {'u':1.680, 'g':3.850, 'r':4.735, 'i':5.111, 'z':4.622}
        exptime  = 53.907456 # seconds
        # conversion factor from nanomaggies to counts
        counts_per_nanomaggy = exptime*10**(-0.4*(22.5+aa[band]+kk[band]*airmass[band]))
        # image in counts for given field properties
        img_counts = np.clip(img_nanomaggies * counts_per_nanomaggy,a_min=0,a_max=None)
        # poisson noise [adu] computed accounting for gain [e/adu]
        img_counts = np.random.poisson(lam=img_counts*gain[band])/gain[band]
        # convert back to nanomaggies
        img_nanomaggies = img_counts / counts_per_nanomaggy
        
    # add gaussian sky to image
    if common_args['add_false_sky']:
        '''
        Add sky with noise level set by "false_sky_sig" 
        keyword. "false_sky_sig" should be in relative  
        AB magnitudes/arcsec2 units. In other words,
        10**(-0.4*false_sky_sig) gives the sample 
        standard deviation in the sky in linear flux units
        [maggies/arcsec2] around a sky level of zero.
        '''
        # sky sig in AB mag/arcsec2
        false_sky_sig = common_args['false_sky_sig']
        # conversion from mag/arcsec2 to nanomaggies/arcsec2
        false_sky_sig = 10**(0.4*(22.5-false_sky_sig))
        # account for pixel scale in final image
        false_sky_sig *= arcsec_per_pixel**2
        # create false sky image
        sky = false_sky_sig*np.random.randn(*img_nanomaggies.shape)
        # add false sky to image in nanomaggies
        img_nanomaggies += sky
    
    # add image to real sdss sky
    if common_args['add_sdss_sky']:
        '''
        Extract field from galaxy survey database using
        effectively weighted by the number of galaxies in
        each field. For this to work, the desired field
        mask should already have been generated and the
        insertion location selected.
        '''
        import sqlcl
        from astropy.wcs import WCS
        run    = sdss_args['sdss_run']
        rerun  = sdss_args['sdss_rerun']
        camcol = sdss_args['sdss_camcol']
        field  = sdss_args['sdss_field']
        ra     = sdss_args['sdss_ra']
        dec    = sdss_args['sdss_dec']
        exptime = 53.907456 # seconds
        
        # sdss data archive server
        das_url = 'http://das.sdss.org/'
    
        # get and uzip corrected image
        corr_url = das_url+'imaging/{}/{}/corr/{}/'.format(run,rerun,camcol)
        corr_image_name = 'fpC-{:06}-{}{}-{:04}.fit'.format(run,band,camcol,field)
        if not os.access(corr_image_name,0):
            corr_url+='{}.gz'.format(corr_image_name)
            os.system('wget {}'.format(corr_url))
            os.system('gunzip {}'.format(corr_image_name))
        # get wcs mapping
        w = WCS(corr_image_name)
        # determine column and row position in image
        colc,rowc = w.all_world2pix(ra,dec,1,ra_dec_order=True)
        # convert to integers
        colc,rowc = int(np.around(colc)),int(np.around(rowc))
        
        # get field properties from skyServer
        dbcmd = ['SELECT aa_{b},kk_{b},airmass_{b},gain_{b},sky_{b},skysig_{b}'.format(b=band),
                 'FROM Field where run={} AND rerun={}'.format(run,rerun),
                 'AND camcol={} AND field={}'.format(camcol,field)]
        lines = sqlcl.query(' '.join(dbcmd)).readlines()
        # zeropoint, atmospheric extinction, airmass, inverse gain, sky, sky uncertainty
        aa,kk,airmass,gain,sky,skysig = [float(var) for var in lines[1].decode("utf-8").split('\n')[0].split(',')]
        #print(aa,kk,airmass,gain,sky,skysig)
        # convert sky to nanomaggies from maggies/arcsec2
        sky *= (1e9*0.396127**2)
        # convert skysig to nanomaggies from relative sky magnitude errors
        skysig *= sky*np.log(10)/2.5
        # software bias added to corrected images to avoid negative values
        softbias = float(fits.getheader(corr_image_name)['SOFTBIAS'])
        # subtract softbias from corrected image to get image in DN
        corr_image_data = fits.getdata(corr_image_name).astype(float) - softbias # [counts]
        # conversion from nanomaggies to counts
        counts_per_nanomaggy = exptime*10**(-0.4*(22.5+aa+kk*airmass))
        # convert image in counts to nanomaggies with Field properties
        corr_image_data /= counts_per_nanomaggy # [nanomaggies]
        
        if common_args['add_sdss_psf'] and not common_args['add_false_psf']:
            '''
            Grab, reconstruct, and convolve real SDSS PSF image
            with the image in nanomaggies.
            '''
            # get corresponding psf reconstruction image
            psf_url = das_url+'imaging/{}/{}/objcs/{}/'.format(run,rerun,camcol)
            psf_image_name = 'psField-{:06}-{}-{:04}.fit'.format(run,camcol,field)
            if os.access(psf_image_name,0):os.remove(psf_image_name)
            psf_url+=psf_image_name
            os.system('wget {}'.format(psf_url))
            psf_ext = {'u':1,'g':2,'r':3,'i':4,'z':5}
            psfname = 'sdss_psf.fit'
            os.system('Sources/utils/sdss-apps/readAtlasImages-v5_4_11/read_PSF {} {} {} {} {}'.format(psf_image_name,psf_ext[band],rowc,colc,psfname))
            if os.access(psf_image_name,0): os.remove(psf_image_name)
            # remove softbias from PSF 
            psfdata = fits.getdata(psfname).astype(float)-1000.
            # normalize for convolution with image in nanomaggies
            psfdata /= np.sum(psfdata)
            # convolve with image in nanomaggies
            img_nanomaggies = convolve(img_nanomaggies,psfdata)
            if os.access(psfname,0):os.remove(psfname)
        
        if common_args['add_poisson']:
            '''
            Add Poisson noise to the PSF-convolved image
            with noise level corresponding to the real SDSS
            field properties.
            '''
            # image in counts for given field properties
            img_counts = np.clip(img_nanomaggies * counts_per_nanomaggy,a_min=0,a_max=None)
            # poisson noise [adu] computed accounting for gain [e/adu]
            img_counts = np.random.poisson(lam=img_counts*gain)/gain
            # convert back to nanomaggies
            img_nanomaggies = img_counts / counts_per_nanomaggy
            
        # add real sky pixel by pixel to image in nanomaggies
        corr_ny,corr_nx = corr_image_data.shape
        ny,nx = img_nanomaggies.shape
        for xx in range(nx):
            for yy in range(ny):
                corr_x = int(colc - nx/2 + xx)
                corr_y = int(rowc - ny/2 + yy)
                if corr_x>=0 and corr_x<=corr_nx-1 and corr_y>=0 and corr_y<=corr_ny-1:
                    img_nanomaggies[yy,xx]+=corr_image_data[corr_y,corr_x]
        if os.access(corr_image_name,0):os.remove(corr_image_name)
        
        # add field info to image header
        warnings.simplefilter('ignore', category=AstropyWarning)            
        header.append(('RUN',run,'SDSS image RUN'),end=True)
        header.append(('RERUN',rerun,'SDSS image RERUN'),end=True)
        header.append(('CAMCOL',camcol,'SDSS image CAMCOL'),end=True)
        header.append(('FIELD',field,'SDSS image FIELD'),end=True)
        header.append(('RA',float(ra),'Cutout centroid RA'),end=True)
        header.append(('DEC',float(dec),'Cutout centroid DEC'),end=True)
        header.append(('COLC',colc,'SDSS image column center'),end=True)
        header.append(('ROWC',rowc,'SDSS image row center'),end=True)
        header.append(('GAIN',gain,'SDSS CCD GAIN'),end=True)
        header.append(('ZERO',aa,'SDSS image zeropoint'),end=True)
        header.append(('EXTC',kk,'SDSS image atm. extinction coefficient'),end=True)
        header.append(('AIRM',airmass,'SDSS image airmass'),end=True)
        header.append(('SKY',sky,'Average sky in full SDSS field [nanomaggies]'),end=True)
        header.append(('SKYSIG',skysig,'Average sky uncertainty per pixel [nanomaggies]'),end=True)
            
    gimage = outputName
    if os.access(gimage,0): os.remove(gimage)
        
#    print('\nAfter Realism:')
#    print('kpc_per_arcsec: {}'.format(kpc_per_arcsec))
#    print('kpc_per_pixel: {}'.format(kpc_per_pixel))
#    print('arcsec_per_pixel: {}'.format(arcsec_per_pixel))
#    m_AB = -2.5*np.log10(np.sum(img_nanomaggies))+22.5
#    print('AB_magnitude: {} at z={}'.format(m_AB,redshift))
#    M_AB = m_AB-5*np.log10(luminosity_distance.value)-25
#    print('AB_Magnitude: {}'.format(M_AB))

    hdu_pri = fits.PrimaryHDU(img_nanomaggies)

    header['REDSHIFT'] = (redshift,'Redshift')
    header.append(('COSMO','FLAT_LCDM','Cosmology'),end=True)
    header.append(('OMEGA_M',cosmo.Om(0),'Matter density'),end=True)
    header.append(('OMEGA_L',cosmo.Ode(0),'Dark energy density'),end=True)
    header.append(('SCALE_1',arcsec_per_pixel,'[arcsec/pixel]'),end=True)
    header.append(('SCALE_2',kpc_per_pixel,'[kpc/pixel]'),end=True)
    header.append(('SCALE_3',kpc_per_arcsec,'[kpc/arcsec]'),end=True)
    header.append(('LUMDIST',cosmo.luminosity_distance(z=redshift).value,'Luminosity Distance [Mpc]'),end=True)
    warnings.simplefilter('ignore', category=AstropyWarning)
    header.extend(zip(common_args.keys(),common_args.values()),unique=True)
    hdu_pri.header = header
    hdu_pri.writeto(gimage)
"""
Tests the observational data loading / writing.
"""

from velociraptor.observations import (
    load_observations,
    ObservationalData,
    MultiRedshiftObservationalData,
)

import unyt
import os

from astropy.cosmology import FlatLambdaCDM

sample_cosmology = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.048, name="test", m_nu=0.0)


def test_single_obs():
    """
    Tests that writing and reading a single observation works.
    """

    test_obs = ObservationalData()

    test_obs.associate_x(
        unyt.unyt_array([1, 2, 3], "Solar_Mass"),
        scatter=None,
        comoving=False,
        description="Galaxy Stellar Mass",
    )
Exemplo n.º 6
0
    def __init__(self,
                 lcfile,
                 walkerfile,
                 m_neV,
                 Mprog,
                 bfield='jansson12',
                 cosmo=FlatLambdaCDM(H0=0.7 * 100. * u.km / u.s / u.Mpc,
                                     Tcmb0=2.725 * u.K,
                                     Om0=0.3),
                 min_delay=0.,
                 max_delay=0.,
                 t_sec_ref=20.,
                 spline=dict(k=2, s=1e-3, ext='extrapolate')):
        """
        Initialize the class

        Parameters
        ----------
        lcfile: str
            path for combined lc file in fits or npy format

        walkerfile: str
            path to MOSFIT results file which contains the walkers

        m_neV: float
            ALP mass in neV

        Mprog: float
            progenitor mass in solar masses
            (currently only 10. and 18. implemented)

        {options}

        bfield: str
            Milky Way Bfield identifier, default: jansson12

        cosmo: `~astropy.cosmology.FlatLambdaCDM`
            used cosmology

        spline: dict
            dictionary with keywords for spline interpolation 
            of likelihood functions

        min_delay: float
            minimum delay between core collapse and
            onset of optical emission in days, default: 0.

        max_delay: float
            maximum delay between core collapse and
            onset of optical emission in days, default: 0.

        """
        if 'fits' in lcfile:
            self._t = Table.read(lcfile)
        elif 'npy' in lcfile:
            self._t = np.load(fname).flat[0]

        self._emin = np.unique(self._t['emin_sed'].data)
        self._emax = np.unique(self._t['emax_sed'].data)
        self._tmin = self._t['tmin'].data
        self._tmax = self._t['tmax'].data
        self._tcen = 0.5 * (self._tmin + self._tmax)
        self._dt = self._tmax - self._tmin

        self._walkerfile = walkerfile
        self._Mprog = Mprog
        self._m_neV = m_neV
        self._bfield = bfield
        self._cosmo = cosmo
        self._snalpflux = SNALPflux(walkerfile, Mprog=Mprog, cosmo=cosmo)
        self._t_sec_ref = t_sec_ref
        self._set_refflux()

        # get the posterior for the explosion time
        self._tpost = TexpPost(walkerfile,
                               min_delay=min_delay,
                               max_delay=max_delay)

        # arrays to store cached g11 and flux for likelihood calculation
        self.__g11cache = None
        self.__fluxcache = None

        # spline interpolations
        self._dlog_spline = []

        # loop over time bins
        for i, dlog in enumerate(self._t['dloglike_scan']):
            # loop energy bins
            self._dlog_spline.append([])
            for j in range(self._emax.size):
                self._dlog_spline[i].append(USpline(self._t['norm_scan'][i,j] * \
                                self._t['ref_flux'][i,j],
                                dlog[j] + self._t['loglike'][i,j], **spline))
Exemplo n.º 7
0
    def _subclass_init(self, **kwargs):
        self.header_file = kwargs['header_file']

        if not os.path.isfile(self.header_file):
            raise ValueError('Header file {} does not exist'.format(self.header_file))

        self.header = self.parse_header(self.header_file)
        self.base_dir = os.path.dirname(self.header_file)

        self.cosmology = FlatLambdaCDM(H0=71, Om0=0.265, Ob0=0.0448)
        self.lightcone = True

        self.legacy_gal_catalog = False
        self._data = dict()
        self._object_files = dict()
        for filename in self.header['includeobj']:
            obj_type = filename.partition('_cat_')[0]

            if obj_type == 'gal':
                self.legacy_gal_catalog = True
            elif obj_type not in self._col_names:
                warnings.warn('Unknown object type {}! Skipped!'.format(obj_type))
                continue

            full_path = os.path.join(self.base_dir, filename)
            if not os.path.isfile(full_path):
                warnings.warn('Cannot find file {}! Skipped!'.format(full_path))
                continue

            self._object_files[obj_type] = full_path

        if self.legacy_gal_catalog:
            if any(t in self._object_files for t in self._legacy_gal_types):
                raise ValueError('cannot determine whether this is a legacy instance catalog!')
            for t in self._legacy_gal_types:
                self._object_files[t] = self._object_files['gal']
            del self._object_files['gal']

        try:
            self.visit = int(self.header.get('obshistid'))
        except (TypeError, ValueError):
            warnings.warn('Cannot parse visit id {}'.format(self.header.get('obshistid')))
            self.visit = None

        shape_quantities = ('gal/a_bulge',
                            'gal/b_bulge',
                            'gal/theta_bulge',
                            'gal/mag_norm_bulge',
                            'gal/a_disk',
                            'gal/b_disk',
                            'gal/theta_disk',
                            'gal/mag_norm_disk')

        self._quantity_modifiers = {
            'galaxy_id': 'gal/total_id',
            'ra_true': (_get_one, 'gal/ra_bulge', 'gal/ra_disk'),
            'dec_true': (_get_one, 'gal/dec_bulge', 'gal/dec_disk'),
            'mag_true_i_lsst': (_get_total_mag, 'gal/mag_norm_bulge', 'gal/mag_norm_disk'),
            'redshift_true': (_get_one, 'gal/redshift_bulge', 'gal/redshift_disk'),
            'bulge_to_total_ratio_i': (_get_bulge_fraction, 'gal/mag_norm_bulge', 'gal/mag_norm_disk'),
            'sersic_disk': 'gal/sersic_n_disk',
            'sersic_bulge': 'gal/sersic_n_bulge',
            'convergence': (_get_one, 'gal/kappa_bulge', 'gal/kappa_disk'),
            'shear_1': (_get_one, 'gal/gamma_1_bulge', 'gal/gamma_1_disk'),
            'shear_2': (_get_one, 'gal/gamma_2_bulge', 'gal/gamma_2_disk'),
            'size_true': (_get_total_a,) + shape_quantities,
            'size_minor_true': (_get_total_b,) + shape_quantities,
            'position_angle_true': (_get_total_beta,) + shape_quantities,
            'ellipticity_1_true': (_get_total_e1,) + shape_quantities,
            'ellipticity_2_true': (_get_total_e2,) + shape_quantities,
            'size_disk_true': 'gal/a_disk',
            'size_disk_minor_true': 'gal/b_disk',
            'size_bulge_true': 'gal/a_bulge',
            'size_bulge_minor_true': 'gal/b_bulge',
        }
Exemplo n.º 8
0
from prep_filters import prep_filters
from scipy.interpolate import interp1d
from app_mags import get_appmags
from astropy.table import Table
from desispec.interpolation import resample_flux
from astropy.cosmology import FlatLambdaCDM
from read_ised import read_ised
from numpy.random import choice
from extinction import calzetti00
from extinction import apply as ext_apply
from madau import lephare_madau
from uv_filter import get_detband
from collections import OrderedDict

root = os.environ['BEAST']
cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Tcmb0=2.725)

def BC03_maker(ngal=None, restframe=False, printit=False, test=False, seed=314, redshifts=None, magnitudes=None, alliseds=False,\
               calzetti=False, madau=False, hiwave=1.e4):

    np.random.seed(seed)

    if type(redshifts) == type(None):
        redshifts = 1.5 + np.linspace(0., 4.0, 20)

    if type(magnitudes) == type(None):
        magnitudes = 20.0 + np.arange(0., 7.0, 0.1)

    if printit:
        print('\n\nSolving for redshifts:\n' + ''.join('%.3lf, ' % x
                                                       for x in redshifts))
class Stacker():

    #general physical parameters
    Efactor = 1.66e12  #(c^2/4piG) in units of (Mo/pc)
    sigma_e = 0.28  #error in each of the ellipticity measurements
    cosmo = FlatLambdaCDM(H0=70., Om0=0.3)  #cosmology

    def __init__(self, radii, estimator, data_path, source_path):
        self.radii = radii  #radial bins
        self.numBins = (len(radii) - 1)  #number of bins

        if estimator != 'SCH' and estimator != 'CJ':
            raise ValueError(
                'Incorrect estimator specified. Accepted estimators are: \'SCH\' or \'CJ\''
            )
        self.estimator = estimator

        self.source_path = data_path + source_path

        #sigma crit inverse function
        sigma_crit_table = table.Table.read(data_path + 'sigma_crit_inv.csv',
                                            format='ascii.csv')
        self.sigma_crit_inv = spline(sigma_crit_table['zl'],
                                     sigma_crit_table['sigma_crit_inv'])

        #initialize sum lists
        #monopole
        self.ERnums = np.zeros(self.numBins)  #list of sums of E(R) numerators
        self.ERdems = np.zeros(
            self.numBins)  #list of sums of E(R) denomimators
        self.ERNerrs = np.zeros(
            self.numBins)  #list of errors on E(R) numerators

        #quadrupole
        if self.estimator == 'SCH':  #Schrabback estimators
            self.ERnumsQ1 = np.zeros(
                self.numBins)  #list of sums of E(R) quadrupole numerators
            self.ERdemsQ1 = np.zeros(
                self.numBins)  #list of sums of E(R) quadrupole denomimators
            self.ERNerrsQ1 = np.zeros(
                self.numBins)  #list of errors on E(R) quadrupole numerators
            self.ERnumsQ2 = np.zeros(
                self.numBins
            )  #list of sums of E(R) cross quadrupole numerators
            self.ERdemsQ2 = np.zeros(
                self.numBins
            )  #list of sums of E(R) cross quadrupole denomimators
            self.ERNerrsQ2 = np.zeros(
                self.numBins
            )  #list of errors on E(R) cross quadrupole numerators
        elif self.estimator == 'CJ':  #CJ estimators
            #quadrupole numerators
            self.ERnums1P = np.zeros(self.numBins)
            self.ERnums1M = np.zeros(self.numBins)
            self.ERnums2P = np.zeros(self.numBins)
            self.ERnums2M = np.zeros(self.numBins)
            #quadrupole demoninators
            self.ERdems1P = np.zeros(self.numBins)
            self.ERdems1M = np.zeros(self.numBins)
            self.ERdems2P = np.zeros(self.numBins)
            self.ERdems2M = np.zeros(self.numBins)
            #quadrupole numerator errors
            self.ERNerrs1P = np.zeros(self.numBins)
            self.ERNerrs1M = np.zeros(self.numBins)
            self.ERNerrs2P = np.zeros(self.numBins)
            self.ERNerrs2M = np.zeros(self.numBins)

        #initialize counters
        self.ns = 0  #initialize number of sources
        self.npr = 0  #initialize number of lens-source pairs

    #calculate angular separation between two points (from SkyCoord)
    def angular_separation(self, lon1, lat1, lon2, lat2):
        sdlon = np.sin(lon2 - lon1)
        cdlon = np.cos(lon2 - lon1)
        slat1 = np.sin(lat1)
        slat2 = np.sin(lat2)
        clat1 = np.cos(lat1)
        clat2 = np.cos(lat2)

        num1 = clat2 * sdlon
        num2 = clat1 * slat2 - slat1 * clat2 * cdlon
        denominator = slat1 * slat2 + clat1 * clat2 * cdlon

        return np.arctan2(np.hypot(num1, num2),
                          denominator)  #return result in rad

    #calculate position angle betweent two points (from SkyCoord)
    #modified to produce angle CCW from x-axis
    def position_angle_sky(self, lon1, lat1, lon2, lat2):
        deltalon = lon2 - lon1
        colat = np.cos(lat2)

        x = np.sin(lat2) * np.cos(lat1) - colat * np.sin(lat1) * np.cos(
            deltalon)
        y = np.sin(deltalon) * colat

        pos_angle = np.arctan2(y, x) + np.pi / 2
        return (pos_angle + (2 * np.pi)) % (2 * np.pi)  #return result in rad

    #convert RA and DEC to x and y
    def convert_xy(self, RA1, DEC1, RA2, DEC2):
        RA1more = RA1 - RA2 > np.pi  #if spanning 360-0 gap
        RA2more = RA2 - RA1 > np.pi  #if spanning 0-360 gap

        RA2 = RA2 + RA1more * (2 * np.pi)
        RA2 = RA2 - RA2more * (2 * np.pi)

        x = -(RA2 - RA1) * np.cos(DEC1)
        y = DEC2 - DEC1
        return x, y

    #convert x and y to RA and DEC
    def convert_radec(self, RA1, DEC1, sx, sy):
        RAprime = RA1 - (sx / np.cos(DEC1))
        RAprime = (RAprime + (2 * np.pi)) % (2 * np.pi)
        DECprime = sy + DEC1

        return RAprime, DECprime

    #rotate coordinate frame (e1, e2, RA, and DEC)
    def rotate_coords(self, l, s, sx, sy, ths):
        numSource = len(s['RA'])  #number of sources

        #rotate e1, e2 by -2s
        coss = np.cos(-2 * ths) * np.ones(numSource)
        sins = np.sin(-2 * ths) * np.ones(numSource)
        Rs = np.array([coss, -1.0 * sins, sins, coss])
        Rs = Rs.T.reshape((numSource, 2, 2))

        es = [s['e1'], s['e2']]  #create arrays of ellipticities
        es = np.transpose(es)
        eRs = [np.dot(rot, ell) for rot, ell in zip(Rs, es)]  #perform rotation
        eRs = np.transpose(eRs)
        e1prime = eRs[0]
        e2prime = eRs[1]

        #rotate x, y by s
        coss = np.cos(-1 * ths) * np.ones(numSource)
        sins = np.sin(-1 * ths) * np.ones(numSource)
        Rs = np.array([coss, -1.0 * sins, sins, coss])
        Rs = Rs.T.reshape((numSource, 2, 2))

        es = [sx, sy]  #array of x's and y's
        es = np.transpose(es)
        eRs = [np.dot(rot, ell) for rot, ell in zip(Rs, es)]  #perform rotation
        eRs = np.transpose(eRs)
        sxprime = eRs[0]
        syprime = eRs[1]

        return e1prime, e2prime, sxprime, syprime

    #perform shear stack for single lens
    def stack_shear(self, l):
        #read in sources
        sources = table.Table.read(self.source_path, format='ascii.csv')

        #calculate distance to lens
        l_dist = np.array(self.cosmo.angular_diameter_distance(
            l['z'])) * 1000.0  #distance to lens galaxy in kpc

        #convert RA and DEC from degrees to radians
        l['RA'] *= (np.pi / 180)
        l['DEC'] *= (np.pi / 180)
        sources['RA'] *= (np.pi / 180)
        sources['DEC'] *= (np.pi / 180)

        #calculate distances and restrict source sample
        separations = self.angular_separation(l['RA'], l['DEC'], sources['RA'],
                                              sources['DEC'])
        r_dists = separations * l_dist

        r_inds = np.digitize(
            r_dists,
            self.radii) - 1  #determine indices of radial bins for distances
        sources = sources[
            r_inds < self.numBins]  #eliminate furthest bin from source sample
        r_dists = r_dists[r_inds <
                          self.numBins]  #eliminate distances that are too far
        r_inds = r_inds[r_inds <
                        self.numBins]  #eliminate indices that are too far

        self.npr += len(sources)  #increment number of lens-source pairs

        if len(sources) > 0:  #if there are any sources
            #calculate lens positon angle
            pos_angle = l['THETA']
            theta_shift = pos_angle * (np.pi / 180)

            #rotate entire frame to align with x-axis

            #convert RA and DEC to x and y
            sx, sy = self.convert_xy(l['RA'], l['DEC'], sources['RA'],
                                     sources['DEC'])

            #rotate e1, e2, x, and y
            e1prime, e2prime, sxprime, syprime = self.rotate_coords(
                l, sources, sx, sy, theta_shift)
            sources['e1'] = e1prime
            sources['e2'] = e2prime

            #calculate position angle of sources (x-axis)
            thetas = np.arctan2(syprime, sxprime)

            e1 = sources['e1']
            e2 = sources['e2']
            eR1s = -e1 * np.cos(2 * thetas) - e2 * np.sin(2 * thetas)
            eR2s = e1 * np.sin(2 * thetas) - e2 * np.cos(2 * thetas)

            #calculate E_crit
            Ecrit = 1. / self.sigma_crit_inv(l['z'])
            Wcrit = Ecrit**(-2)

            #calculate monopole
            histWeight = eR1s * sources['weight']
            ERnumsH = Ecrit * Wcrit * np.histogram(
                r_dists, bins=self.radii,
                weights=histWeight)[0]  #add to E(R) numerator
            histWeight = sources['weight']
            ERdemsH = Wcrit * np.histogram(
                r_dists, bins=self.radii,
                weights=histWeight)[0]  #add to E(R) denominator
            histWeight = Ecrit * Wcrit * sources['weight'] * self.sigma_e
            histWeight = np.square(histWeight)
            ERNerrsH = np.histogram(r_dists,
                                    bins=self.radii,
                                    weights=histWeight)[0]  #add to E(R) errors

            #sum histogram results to lists
            self.ERnums = self.ERnums + ERnumsH
            self.ERdems = self.ERdems + ERdemsH
            self.ERNerrs = self.ERNerrs + ERNerrsH

            #calculate quadrupole
            eg = abs(
                (l['A'] - l['B']) / (l['A'] + l['B']))  #galaxy ellipticity

            if eg != 0:  #CG quadrupole estimators

                if self.estimator == 'SCH':

                    #Schrabback estimators

                    #add E(R) tangential and cross results to sums
                    #numerators
                    histWeight = eR1s * sources['weight'] * np.cos(2 * thetas)
                    ERnumsH1 = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) numerator
                    histWeight = eR2s * sources['weight'] * np.sin(2 * thetas)
                    ERnumsH2 = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) numerator

                    #denominators
                    histWeight = sources['weight'] * np.cos(
                        2 * thetas) * np.cos(2 * thetas)
                    ERdemsH1 = 2 * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) denominator
                    histWeight = sources['weight'] * np.sin(
                        2 * thetas) * np.sin(2 * thetas)
                    ERdemsH2 = 2 * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) denominator

                    #error numerators
                    histWeight = Ecrit * Wcrit * sources['weight'] * np.cos(
                        2 * thetas)
                    histWeight = np.square(histWeight)
                    ERNerrsH1 = np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) errors
                    histWeight = Ecrit * Wcrit * sources['weight'] * np.sin(
                        2 * thetas)
                    histWeight = np.square(histWeight)
                    ERNerrsH2 = np.histogram(
                        r_dists, bins=self.radii,
                        weights=histWeight)[0]  #add to E(R) errors

                    #sum histogram results to lists
                    self.ERnumsQ1 = self.ERnumsQ1 + ERnumsH1
                    self.ERdemsQ1 = self.ERdemsQ1 + ERdemsH1
                    self.ERNerrsQ1 = self.ERNerrsQ1 + ERNerrsH1
                    self.ERnumsQ2 = self.ERnumsQ2 + ERnumsH2
                    self.ERdemsQ2 = self.ERdemsQ2 + ERdemsH2
                    self.ERNerrsQ2 = self.ERNerrsQ2 + ERNerrsH2

                elif self.estimator == 'CJ':

                    # CJ estimator

                    th1 = (thetas + (np.pi / 8)) % (
                        np.pi / 2)  #reduce to single quadrant, for gamma_1
                    th2 = thetas % (np.pi / 2
                                    )  #reduce to single quadrant, for gamma_2

                    #E_1^+ estimator
                    inds = np.logical_and(th1 >= 0, th1 < (np.pi / 4))
                    shears = sources['e1'] * inds
                    weights = sources['weight'] * inds
                    histResult = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=shears * weights)[0]
                    self.ERnums1P = self.ERnums1P + histResult
                    histResult = Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=weights)[0]
                    self.ERdems1P = self.ERdems1P + histResult
                    histResult = Ecrit * Ecrit * Wcrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=np.square(weights))[0]
                    self.ERNerrs1P = self.ERNerrs1P + histResult

                    #E_1^- estimator
                    inds = np.logical_and(th1 >= (np.pi / 4), th1 <
                                          (np.pi / 2))
                    shears = sources['e1'] * inds
                    weights = sources['weight'] * inds
                    histResult = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=shears * weights)[0]
                    self.ERnums1M = self.ERnums1M + histResult
                    histResult = Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=weights)[0]
                    self.ERdems1M = self.ERdems1M + histResult
                    histResult = Ecrit * Ecrit * Wcrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=np.square(weights))[0]
                    self.ERNerrs1M = self.ERNerrs1M + histResult

                    #E_2^+ estimator
                    inds = np.logical_and(th2 >= 0, th2 < (np.pi / 4))
                    shears = sources['e2'] * inds
                    weights = sources['weight'] * inds
                    histResult = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=shears * weights)[0]
                    self.ERnums2P = self.ERnums2P + histResult
                    histResult = Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=weights)[0]
                    self.ERdems2P = self.ERdems2P + histResult
                    histResult = Ecrit * Ecrit * Wcrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=np.square(weights))[0]
                    self.ERNerrs2P = self.ERNerrs2P + histResult

                    #E_2^- estimator
                    inds = np.logical_and(th2 >= (np.pi / 4), th2 <
                                          (np.pi / 2))
                    shears = sources['e2'] * inds
                    weights = sources['weight'] * inds
                    histResult = Ecrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=shears * weights)[0]
                    self.ERnums2M = self.ERnums2M + histResult
                    histResult = Wcrit * np.histogram(
                        r_dists, bins=self.radii, weights=weights)[0]
                    self.ERdems2M = self.ERdems2M + histResult
                    histResult = Ecrit * Ecrit * Wcrit * Wcrit * np.histogram(
                        r_dists, bins=self.radii,
                        weights=np.square(weights))[0]
                    self.ERNerrs2M = self.ERNerrs2M + histResult

        #run garbage collection to free memory
        del sources[:]
        gc.collect()

    #final calculations and output
    def return_stack(self):
        #convert 0s to 1s to avoid divide-by-zero errors
        self.ERdems[self.ERdems == 0.0] = 1.0
        if self.estimator == 'SCH':
            self.ERdemsQ1[self.ERdemsQ1 == 0.0] = 1.0
            self.ERdemsQ2[self.ERdemsQ2 == 0.0] = 1.0
        elif self.estimator == 'CJ':
            self.ERdems1P[self.ERdems1P == 0.0] = 1.0
            self.ERdems1M[self.ERdems1M == 0.0] = 1.0
            self.ERdems2P[self.ERdems2P == 0.0] = 1.0
            self.ERdems2M[self.ERdems2M == 0.0] = 1.0

        #divide by weight sums to determine weighted average for each bin
        ERAvgs = self.ERnums / self.ERdems  #list of weighted average excess mass density
        ERAvge = np.sqrt(
            self.ERNerrs
        ) / self.ERdems  #list of errors in average excess mass density

        #quadrupole estimators and errors
        if self.estimator == 'SCH':
            ERAvgsQ1 = self.ERnumsQ1 / self.ERdemsQ1  #list of weighted average excess mass density in quadrupole
            ERAvgsQ2 = self.ERnumsQ2 / self.ERdemsQ2  #list of weighted average excess mass density in cross quadrupole

            ERAvgeQ1 = self.sigma_e * np.sqrt(self.ERNerrsQ1) / self.ERdemsQ1
            ERAvgeQ2 = self.sigma_e * np.sqrt(self.ERNerrsQ2) / self.ERdemsQ2

            return ERAvgs, ERAvge, ERAvgsQ1, ERAvgeQ1, ERAvgsQ2, ERAvgeQ2, self.ns, self.npr

        elif self.estimator == 'CJ':
            ERAvgs1P = self.ERnums1P / self.ERdems1P
            ERAvge1P = self.sigma_e * np.sqrt(self.ERNerrs1P) / self.ERdems1P

            ERAvgs1M = self.ERnums1M / self.ERdems1M
            ERAvge1M = self.sigma_e * np.sqrt(self.ERNerrs1M) / self.ERdems1M

            ERAvgs2P = self.ERnums2P / self.ERdems2P
            ERAvge2P = self.sigma_e * np.sqrt(self.ERNerrs2P) / self.ERdems2P

            ERAvgs2M = self.ERnums2M / self.ERdems2M
            ERAvge2M = self.sigma_e * np.sqrt(self.ERNerrs2M) / self.ERdems2M

            return ERAvgs, ERAvge, ERAvgs1P, ERAvge1P, ERAvgs1M, ERAvge1M, ERAvgs2P, ERAvge2P, ERAvgs2M, ERAvge2M, self.ns, self.npr
Exemplo n.º 10
0
import hk_tool_box
import warnings
from sklearn.cluster import KMeans
from astropy.cosmology import FlatLambdaCDM
from astropy.coordinates import SkyCoord
from astropy import units
import time
import hk_healpy_tool

warnings.filterwarnings('error')

# parameters
# cosmological parameters
omega_m0 = 0.315
H0 = 67.5
cosmos = FlatLambdaCDM(H0, omega_m0)

# separation bin, comoving or angular diameter distance in unit of Mpc/h
sep_bin_num = 25
bin_st, bin_ed = 0.02, 100
separation_bin = hk_tool_box.set_bin_log(bin_st, bin_ed,
                                         sep_bin_num + 1).astype(numpy.float32)

# bin number for ra & dec of each exposure
deg2arcmin = 60
deg2rad = numpy.pi / 180

# chi guess bin for PDF_SYM
delta_sigma_guess_num = 50
num_m = 25
num_p = delta_sigma_guess_num - num_m
VER = min_ver
total = max_ver - min_ver
# pbr.printProgress(0,total,prefix='Progress:',barLength=100)

for VER in range(min_ver, max_ver + 1):

    alpha_x_path = '/home/lkit/wslap/data/A370/reconstruction/alpha_x_rad/recomp_alpha_x_rad_VER' + str(
        VER) + '.fits'  # type full path
    alpha_y_path = '/home/lkit/wslap/data/A370/reconstruction/alpha_y_rad/recomp_alpha_y_rad_VER' + str(
        VER) + '.fits'
    output_path_fig = '/home/lkit/tmp/critcurve_map_sys9_30mas_VER' + str(
        VER) + '.fits'  # type output path
    output_path_data = '/home/lkit/magnification/VER' + str(VER)

    cosmo = FlatLambdaCDM(H0=70., Om0=0.3)

    im = pyfits.open(alpha_x_path)
    alpha_x = im[0].data / math.pi * 180. * 3600. / pix_scale
    im.close()

    im = pyfits.open(alpha_y_path)
    alpha_y = im[0].data / math.pi * 180. * 3600. / pix_scale
    im.close()

    mu = Mag(src_z, alpha_x, alpha_y)

    # coor1,4,6,9
    # coor=[[[484,781],[715,762],[758,755]],
    #  [[383,770],[652,774],[915,724]],
    #  [[386,748],[690,744],[842,714]],
Exemplo n.º 12
0
rank = comm.Get_rank()
numprocs = comm.Get_size()

mg_bin_num = int(argv[1])

# cosmology
omega_m0 = 0.31
omega_lam0 = 1 - omega_m0
h = 0.6735
C_0_hat = 2.99792458
H_0 = 100 * h
coeff = 1000 * C_0_hat / h

coeff_crit = C_0_hat**2 / 4 / numpy.pi / 6.674

cosmos = FlatLambdaCDM(H_0, Om0=omega_m0)

# Halo parameters
Mass = 5 * 10**12.5  #M_sun/h
conc = 3.5  #concentration
len_z = 0.3  #redshift
halo_position = galsim.PositionD(0, 0)  #arcsec
com_dist_len = cosmos.comoving_distance(len_z).value * h  #Mpc/h
# print("Lens plane at z = %.2f, %.5f Mpc/h"%(len_z, com_dist_len) )
len_pos = SkyCoord(ra=0 * units.arcsec, dec=0 * units.arcsec, frame="fk5")

# lens profile
nfw = galsim.NFWHalo(Mass, conc, len_z, halo_position, omega_m0, omega_lam0)

shear_tag = 0
Exemplo n.º 13
0
import pandas as pd
import numpy  as np

from   astropy.cosmology import FlatLambdaCDM


##
h               = 0.673
H0              = 100. * h

cosmo           = FlatLambdaCDM(H0=H0, Om0=0.3)

area            = 14.e3

##  fNL.
##  log10nbar   = -4

##  RSD.
log10nbar       = np.log10(3.e-4)

##                                                                                                                                                         
print('\n\nWelcome to figure-of-merit (n = %le) .\n\n' % log10nbar)

zmin            =   2.0
zmax            =   5.0

volume          = (cosmo.comoving_volume(zmax).value - cosmo.comoving_volume(zmin).value) 
volume         *=  h **3.

fsky            = area / 41252.96
ngal            = 10. ** log10nbar * fsky * volume
def sysmat(
    base_output,
    fitopt="_",
    muopt="_",
    topdir="",
    do_each=0,
    sysfile="NONE",
    output_dir="COSMO",
    sysdefault=1,
    remove_extra=True,
    covlines="",
    topfile="NONE",
    errscales="NONE",
    subdir="*",
):
    import numpy as np
    import array
    import math
    import os
    from astropy import cosmology as cosmo
    from astropy.cosmology import FlatLambdaCDM
    import re

    if not output_dir:
        output_dir = "COSMO"
    if not sysdefault:
        sysdefault = 1
    if not remove_extra:
        remove_extra = True

    if not os.path.isdir(output_dir):
        os.mkdir(output_dir)
    print(len(covlines))
    # stop
    sysnum = len(covlines)
    if covlines == "NONE" or covlines == [[]]:
        sysnum = 0
    co = 0
    sys_ratio = 1
    print("subdir", subdir)
    print("topdir", topdir)
    print("fitopt", fitopt)
    print("muopt", muopt)
    print("base_output", base_output)
    print("ls " + topdir + "/" + subdir + "/*" + fitopt + "*" + muopt + "*" +
          "M0DIF")
    os.system("ls " + topdir + "/" + subdir + "/*" + fitopt + "*" + muopt +
              "*" + "M0DIF > " + base_output + ".list")
    print("subdir", subdir)
    print("topdir", topdir)
    # stop
    print("Created list of all M0DIF files called " + base_output + ".list")
    if not os.path.isfile(base_output + ".list"):
        print(
            "List file does not exist. No M0DIF files!!! This makes me sad!!! Im done here!!"
        )
        return 0
    if os.stat(base_output + ".list").st_size == 0:
        print(
            "List exists but is empty. No M0DIF files!!! This makes me sad!!! Im done here!!"
        )
        return 0

    if not os.path.exists(topdir + "/SALT2mu_FITSCRIPTS/FITJOBS_SUMMARY.LOG"):
        print(topdir + "/SALT2mu_FITSCRIPTS/FITJOBS_SUMMARY.LOG")
        print(
            "Log file not there. No M0DIF files!!! This makes me sad!!! Im done here!!"
        )
        return 0

    if os.path.isfile(base_output + ".list"):
        print("##### I AM OPENING THIS FILE FOR READ: " + base_output +
              ".list")
        file_lines = open(base_output + ".list", "r").readlines()
    if os.path.isfile(topdir + "/SALT2mu_FITSCRIPTS/FITJOBS_SUMMARY.LOG"):
        log_lines = open(topdir + "/SALT2mu_FITSCRIPTS/FITJOBS_SUMMARY.LOG",
                         "r").readlines()
    print(topdir + "/SALT2mu_FITSCRIPTS/FITJOBS_SUMMARY.LOG")

    filesize = len(file_lines)  # read in number of M0DIF files
    print("Total of " + str(filesize) + " M0DIF files")

    MUOPT_var1 = []
    MUOPT_var2 = []

    FITOPT_var1 = []
    FITOPT_var2 = []

    SYSOPT_var1 = []
    SYSOPT_var2 = []
    SYSOPT_var3 = []

    INPDIR1 = []

    for xco in range(0, len(log_lines)):
        if "MUOPT:" in log_lines[xco]:
            mu_split = log_lines[xco].split()
            print(mu_split)
            MUOPT_var1 = np.append(MUOPT_var1, "MUOPT" + mu_split[1])
            MUOPT_var2 = np.append(MUOPT_var2, mu_split[2][1:-1])

        if "INPDIR+:" in log_lines[xco]:
            mu_split = log_lines[xco].split()
            INPDIR1 = np.append(INPDIR1, mu_split[1])

    print(INPDIR1[0] + "/FITOPT.README")
    # stop
    # stop
    if not os.path.isfile(INPDIR1[0] + "/FITOPT.README"):
        print("No FITOPT README in !!!" + INPDIR1[0] +
              "/FITOPT.README This makes me sad!!! Im done here!!")
        return 0
    if os.path.isfile(INPDIR1[0] + "/FITOPT.README"):
        fit_lines = open(INPDIR1[0] + "/FITOPT.README", "r").readlines()

    for xco in range(0, len(fit_lines)):
        if "FITOPT:" in fit_lines[xco]:
            mu_split = fit_lines[xco].split()
            FITOPT_var1 = np.append(FITOPT_var1, "FITOPT" + mu_split[1])
            FITOPT_var2 = np.append(FITOPT_var2, mu_split[2][1:-1])

    if (os.path.isfile(sysfile) & (sysfile != "NONE") &
        (errscales == "NONE")) | ((sysfile == "NONE") & (errscales != "NONE")):
        if os.path.isfile(sysfile) & (sysfile != "NONE") & (errscales
                                                            == "NONE"):
            if (os.path.isfile(sysfile) == False) & (sysfile != "NONE"):
                print("That " + sysfile +
                      " doesnt exist.  Grrrr.  Have to leave")

            sys_lines = open(sysfile, "r").readlines()
        if (sysfile == "NONE") & (errscales != "NONE"):
            sys_lines = errscales
        print("syslines", sys_lines)
        # stop
        for xco in range(0, len(sys_lines)):
            if "ERRSCALE:" in sys_lines[xco]:
                mu_split = sys_lines[xco].split()
                SYSOPT_var1 = np.append(SYSOPT_var1, mu_split[1])
                SYSOPT_var2 = np.append(SYSOPT_var2, mu_split[2])
                SYSOPT_var3 = np.append(SYSOPT_var3, mu_split[3])

    if (sysfile == "NONE") & (errscales == "NONE"):
        print(
            "WARNING: All systematics have default scaling with no cuts.  This is really dangerous!"
        )

        SYSOPT_var1 = []
    if (sysfile != "NONE") & (errscales != "NONE"):
        print(
            "You have a list of systematics in your inFile and in your included file.  That is one two many lists.  We have to stop"
        )

    xco = 0
    if topfile != "NONE":
        topfile = file_lines[xco][:-33] + topfile
    if (topfile == "NONE") | (topfile == "") | (topfile == "None"):
        topfile = file_lines[xco][:-1]

    skipc = linef(topfile, "VARNAMES")
    if topfile != "":
        z1, mu1, mu1e = np.loadtxt(topfile,
                                   usecols=(4, 5, 6),
                                   unpack=True,
                                   dtype="str",
                                   skiprows=skipc + 1)
    if topfile == "":
        z1, mu1, mu1e = np.loadtxt(topfile,
                                   usecols=(4, 5, 6),
                                   unpack=True,
                                   dtype="str",
                                   skiprows=skipc + 1)
    print("topfile", topfile)
    mu1 = mu1.astype(float)
    mu1e = mu1e.astype(float)
    z1 = z1.astype(float)
    # xxa=[mu1e<90]

    xxa = [
        mu1e < np.inf
    ]  # CHANGED BY DILLON HERE to get covmats all the same size for multiple sims
    z1 = z1[xxa]
    mu1 = mu1[xxa]
    mu1e = mu1e[xxa]
    cosmo2 = FlatLambdaCDM(H0=70, Om0=0.3)
    x = cosmo2.luminosity_distance(z1).value
    mu_syn = 5.0 * (np.log10(x)) + 25.0 - 19.35
    mu_syn1 = mu_syn + mu1

    f1 = open(output_dir + "/lcparam_" + base_output + ".txt",
              "w")  # this is the file for cosmomc
    f1.write(
        "#name zcmb zhel dz mb dmb x1 dx1 color dcolor 3rdvar d3rdvar cov_m_s cov_m_c cov_s_c set ra dec biascor \n"
    )  # standard format
    for x in range(0, len(z1)):
        f1.write(
            str(x) + " " + str(z1[x]) + " " + str(z1[x]) + " 0.0 " +
            str(mu_syn1[x]) + " " + str(mu1e[x]) +
            " 0 0 0 0 0 0 0 0 0 0 0 0\n")
    f1.close()
    bigmatmm = np.zeros((len(z1), len(z1), sysnum + 1)) + 0.000000

    logf = open(output_dir + "/" + base_output + ".log", "w")
    for xco in range(0, len(file_lines)):
        print(file_lines[xco].split("_")[-2],
              file_lines[xco].split("_")[-1][:-7])
        # SALT2mu_SNLS+SDSS+LOWZ+PS1_Scolnic2+HST/DS17/SALT2mu_FITOPT000_MUOPT000.M0DIF
        # stop
        xx1 = FITOPT_var1 == file_lines[xco].split("_")[-2]
        xx2 = MUOPT_var1 == file_lines[xco].split("_")[-1][:-7]
        skipc = linef(file_lines[xco][:-1], "VARNAMES")
        z2, mu2, mu2e = np.loadtxt(file_lines[xco][:-1],
                                   usecols=(4, 5, 6),
                                   unpack=True,
                                   dtype="str",
                                   skiprows=skipc + 1)
        print(file_lines[xco][:-1])
        mu2 = mu2.astype(float)
        mu2e = mu2e.astype(float)
        z2 = z2.astype(float)
        # xxa=[mu2e<900000]
        z2 = z2[xxa]
        mu2 = mu2[xxa]
        mu2e = mu2e[xxa]

        cosmo2 = FlatLambdaCDM(H0=70, Om0=0.3)
        x = cosmo2.luminosity_distance(z1).value
        mu_syn2 = 5.0 * (np.log10(x)) + 25.0 - 19.35
        print(len(z1), len(z2), len(mu1), len(mu_syn2), len(mu2))
        # 35 32 35 35 32
        mu_syn2 = mu_syn2 + mu2
        xxb = (z1 == 0) | (z2 == 0)
        if len(z2[xxb]) > 0:
            mu_syn2[xxb] = mu_syn1[xxb]
        sys_ratio = float(sysdefault)
        print("sysopt", SYSOPT_var1)
        print(FITOPT_var2)
        # stop
        if len(SYSOPT_var1) > 0:
            comatch = 0
            for y1 in range(0, len(SYSOPT_var1)):
                filtered1 = fnmatch.filter([FITOPT_var2[xx1][0]],
                                           SYSOPT_var1[y1])
                filtered2 = fnmatch.filter([MUOPT_var2[xx2][0]],
                                           SYSOPT_var2[y1])
                if (len(filtered1) > 0) & (len(filtered2) > 0):
                    print("sys", SYSOPT_var3)
                    sys_ratio = float(SYSOPT_var3[y1])
                    # print sys_ratio
                    # stop
                    print("Have a systematic from " + str(SYSOPT_var1[y1]) +
                          str(SYSOPT_var2[y1]) + " of " + str(SYSOPT_var3[y1]))
                    logf.write("Have a systematic from " +
                               str(SYSOPT_var1[y1]) + str(SYSOPT_var2[y1]) +
                               " of " + str(SYSOPT_var3[y1]) + "\n")
                    # stop
                    if comatch > 0:
                        print(
                            "WARNING you have had multiple systematics match up!!! That is bad"
                        )
                    comatch = comatch + 1

                    # if ((np.amax(np.absolute(z1-z2)/z1)>0.1)&(sys_ratio>0)):

                    #            print z1-z2
                    #            print np.absolute(z1-z2)/z1
                    #            print 'There is a misalignment of z bins!!! We have to stop!'
                    #            print file_lines[xco][:-1]
                    #            print z1[0], z2[0], z1[0]
                    #            stop

        # if 'SALT2' in FITOPT_var2[xx1][0]:
        # print sys_ratio
        # stop
        distm = np.zeros((1))
        dm2 = mu_syn1 - mu_syn2
        dm2 = np.multiply(dm2, sys_ratio)
        dm2t = np.matrix(dm2)
        dm2t = dm2t.T
        dmm = dm2t * np.matrix(dm2)

        # stop
        x = 0
        bigmatmm[:, :, x] = np.add(bigmatmm[:, :, x], np.multiply(dmm, 1.0))
        print("bigmat", bigmatmm[1, 1, 0], dmm[1, 1])
        if dmm[1, 1] > 0.3:
            print(file_lines[xco])
        print("covlines all", covlines)
        print("sysnum", sysnum + 1)
        # stop
        for x in range(1, sysnum + 1):
            print(x)
            print("covlines", covlines[x - 1])
            syscheck1 = covlines[x - 1][1][1:-1].split(",")[0]
            syscheck2 = covlines[x - 1][1][1:-1].split(",")[1]
            sys_flag1 = False
            sys_flag2 = False
            if syscheck1[0] == "-":
                sys_flag1 = syscheck1[1:] not in FITOPT_var2[xx1][0]
            if syscheck1[0] == "+":
                sys_flag1 = syscheck1[1:] in FITOPT_var2[xx1][0]
            if syscheck1[0] == "=":
                sys_flag1 = syscheck1[1:] == FITOPT_var2[xx1][0]
            if syscheck2[0] == "-":
                sys_flag2 = syscheck2[1:] not in MUOPT_var2[xx2][0]
            if syscheck2[0] == "+":
                sys_flag2 = syscheck2[1:] in MUOPT_var2[xx2][0]
            if syscheck2[0] == "=":
                sys_flag2 = syscheck2[1:] == MUOPT_var2[xx2][0]
            if syscheck1[0] == "-":
                print(sys_flag1)
                print(sys_flag2)
                print(FITOPT_var2[xx1][0], MUOPT_var2[xx2][0],
                      (sys_flag1) & (sys_flag2))
                # stop
            if (sys_flag1) & (sys_flag2):
                logf.write(FITOPT_var2[xx1][0] + " " + MUOPT_var2[xx2][0] +
                           " " + syscheck1[0:] + " " + syscheck2[0:] + " " +
                           str(x) + " " + str(sys_ratio) + " \n")
                bigmatmm[:, :, x] = np.add(bigmatmm[:, :, x],
                                           np.multiply(dmm, 1.0))

        co = co + 1
    for z in range(0, (sysnum + 1)):
        gmm = open(output_dir + "/sys_" + base_output + "_" + str(z) + ".txt",
                   "w")
        gmm.write(str(len(z1)) + "\n")
        for x in range(0, len(z1)):
            linemm = ""
            for y in range(0, len(z1)):
                linemm = ""
                linemm = str("%.8f" % bigmatmm[x, y, z])
                gmm.write(linemm + "\n")
        gmm.close()

    dataset(output_dir, base_output, "_0", "", sys=1)
    dataset(output_dir, base_output, "_nosys", "", sys=0)
    print("sysnum", sysnum)
    # stop
    for z in range(1, sysnum + 1):
        temp = dataset(output_dir, base_output, "_" + str(z), "", sys=1)
    print("just did it")
    logf.close()
    return 2
Exemplo n.º 15
0
import numpy as np
from numpy import pi
from source_mu import Fisher, SNR
from astropy.cosmology import FlatLambdaCDM
from multiprocessing import Pool, cpu_count
from functools import partial
import h5py
import time
from scipy.interpolate import interp1d
""" Geometricised Units; c=G=1, Mass[1500 meter/Solar Mass], Distance[meter], Time[3.0e8 meter/second]"""

"""Parameters: (ln Mc_z, ln(Lambda),tc,Phi_c,ln d)"""
cosmo_true=FlatLambdaCDM(70.5,0.2736)
# standard siren (EOS: SLY)
m1_SLY = m2_SLY = 1.433684*1500. 	  	 
Lambda_SLY=2.664334e+02
PN_order=3.5
dl_dM_SLY=(3.085067e+02-1.997018e+02)/((1.433684e+00  -1.493803)*1500.*2.)
sm1=sm2=0.09*1500.
Z_true=1.5

N=100000
z_Peak=2.5
z_Max=10.
#load data
m,L=np.loadtxt('Mass_Vs_TidalDeformability_SLY.txt',usecols=(0,1),unpack=True)
m*=1500.
m_l=min(m)
m_u=max(m)
mass=interp1d(L,m,kind='cubic')
Lamb=interp1d(m,L,kind='cubic')
Exemplo n.º 16
0
import numpy as np
from numpy import pi
from scipy import stats
from scipy import special
from scipy.interpolate import interp1d
from source_mu import Fisher,SNR
#from source import SNR
from astropy.cosmology import FlatLambdaCDM
from astropy.cosmology import z_at_value
from matplotlib import pyplot as ppl
cosmo=FlatLambdaCDM(70.5,0.2736)
from scipy.optimize import curve_fit
from scipy.optimize import fsolve
from scipy.optimize import root
##import corner
#from scipy import stats
from scipy import interpolate
import scipy.integrate as integrate
from scipy.special import erfc
#from Selection import P_det_H0
from joblib import Parallel, delayed
import multiprocessing
import math
""" Geometricised Units; c=G=1, Mass[1500 meter/Solar Mass], Distance[meter], Time[3.0e8 meter/second]"""

"""Parameters: (ln Mc_z, ln(Lambda),tc,Phi_c,ln d)"""

# standard siren (EOS: SLY)
m1_SLY = m2_SLY = 1.433684*1500. 	  	 
Lambda_SLY=2.664334e+02
PN_order=3.5
import MultiDark as MD
import sys
import glob
import os
import numpy as n
import astropy.io.fits as fits

from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
cosmoMD = FlatLambdaCDM(H0=67.77 * u.km / u.s / u.Mpc,
                        Om0=0.307115,
                        Ob0=0.048206)

snList = n.array(
    glob.glob(os.path.join(os.environ["MD40"], "snapshots", "out_*.list")))
snList.sort()


def get_first(path_2_snapshot):
    fl = MD.fileinput.input(path_2_snapshot)
    for line in fl:
        if line[0] == "#":
            outt = 1  #print line
            if line[1] == "a":
                aexp = float(line[5:])
        else:
            fl.close()
            return line.split(), aexp


print "p snapshots"
Exemplo n.º 18
0
#!/usr/bin/env python

import numpy
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import ticker, cm
import scipy.integrate as integrate
from astropy.cosmology import FlatLambdaCDM
import astropy
matplotlib.rcParams['font.size'] = 14
matplotlib.rcParams['lines.linewidth'] = 2.0

# cosmology
OmegaM0 = 0.3
cosmo = FlatLambdaCDM(H0=100, Om0=OmegaM0)

gamma = 0.55
sigma_v = 300.

# tho code is hard wired to change these numbers requires surgery
zmax = 0.2
nbins = 2
binwidth = zmax / nbins
bincenters = binwidth / 2 + binwidth * numpy.arange(nbins)

# power spectrum from CAMB
matter = numpy.loadtxt(
    "/Users/akim/project/PeculiarVelocity/pv/dragan/matterpower.dat")

# vv
# f=OmegaM0**0.55
if os.path.isdir(two):
    print('2nd directory exists!')
else:
    print('Uh oh! Second directory does not exist!')

galaxies = [
    'J0826', 'J0901', 'J0905', 'J0944', 'J1107', 'J1219', 'J1341', 'J1506',
    'J1558', 'J1613', 'J2116', 'J2140'
]
filters = ['F475W', 'F814W']
dependency = ['dep1', 'dep2']
redshifts = [
    0.603, 0.459, 0.712, 0.514, 0.467, 0.451, 0.451, 0.658, 0.608, 0.402,
    0.449, 0.728, 0.752
]
cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Om0=0.3)


#minmax function used to set axis boundaries
def minmax(values):
    axes = np.zeros([len(values)])
    coords = np.zeros(2)
    coords[0] = np.min(values) - 0.1
    coords[1] = np.max(values) + 0.1
    return coords


# function to add text to plots
def addtext(xvals, yvals):
    med = np.median(yvals)
    mean = np.mean(yvals)
Exemplo n.º 20
0
'''
constants used throughout project
'''

import numpy as np
from astropy.cosmology import FlatLambdaCDM

RERUN_ANALYSIS = False

## set cosmology to Planck 2018 Paper I Table 6
cosmo = FlatLambdaCDM(H0=67.32, Om0=0.3158, Ob0=0.03324)

boss_h = 0.676  ## h that BOSS uses.
h = 0.6732  ## planck 2018 h
eta_star = cosmo.comoving_distance(
    1059.94
).value  ## z_drag from Planck 2018 cosmology paper Table 2, all Planck alone
rs = 147.09  ## try rs=r_drag from Planck 2018 same table as z_drag above
lstar = np.pi * eta_star / rs
dklss = np.pi / 19.  ##width of last scattering -- see  Bo & David's paper.
import matplotlib.gridspec as gridspec
#gs = gridspec.GridSpec(13,12)
import matplotlib
from matplotlib import font_manager
import numpy as np
from astropy.io import ascii
from astropy.cosmology import FlatLambdaCDM
import matplotlib.pyplot as plt
from load_and_smooth_map import *
'''Plot the voids and galaxies on top of slices of the map.  Each slice is separated by 2 h^{-1} Mpc.
Use integer division to find galaxies in each slice: select all galaxies between a given RA slice and the next one.'''

# Map parameters and cosmology
cosmo = FlatLambdaCDM(H0=70, Om0=0.31)
ra0 = 149.975
dec0 = 2.15
zmid = 2.35
dec1 = 2.4880706
deg_per_hMpc = 1. / cosmo.h / cosmo.comoving_distance(
    zmid).value * 180. / np.pi
dist_to_center = cosmo.comoving_distance(zmid).value * cosmo.h

# Load the map
mapfile = 'map_2017tmp_sm2.0.bin'
map_smoothed, allx, ally, allz = load_and_smooth_map(mapfile, (48, 48, 680))

# Load the galaxies and convert coordinates to h^{-1} Mpc
galaxies = ascii.read('galxcorr_cl2016_v0_with_vuds.dat')
galaxies_x = np.cos(
    0.5 * (dec0 + dec1) * np.pi / 180.) * (galaxies['ra'] - ra0) / deg_per_hMpc
galaxies_y = (galaxies['dec'] - dec0) / deg_per_hMpc
Exemplo n.º 22
0
 def setup(self):
     z_L = 0.8
     z_S = 3.0
     from astropy.cosmology import FlatLambdaCDM
     cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05)
     self.lensCosmo = LensCosmo(z_L, z_S, cosmo=cosmo)
Exemplo n.º 23
0
import pycrates                     # import crates ciao routines

import sys, os
import logging
import time
import subprocess
import numpy as np
from scipy.interpolate import interp1d
from numpy.random import poisson

import fit
import preAnalysis

#--- cosmologia
h = 0.7
cosmo = FlatLambdaCDM(H0=h*100, Om0=0.3)

#--- constants
Msol = 1.98847e33
DEG2RAD=np.pi/180.0
kpc_cm = 3.086e21

# Funções básicas
def AngularDistance(z):
    DA = float( (cosmo.luminosity_distance(z)/(1+z)**2)/u.Mpc ) # em Mpc
    return DA

#--- Convertion function: kpc to physical
def kpcToPhy(radius,z,ARCSEC2PHYSICAL=0.492):
    DA = AngularDistance(z)
    radius = radius/1000 # Mpc
Exemplo n.º 24
0
    idx = (np.abs(array - value)).argmin()
    return idx, array[idx]

#hdul = fits.open('truth_hpix_Chinchilla-0Y3_v1.6_truth.31.fits')
hdul = fits.open('../catalogs/truth_hpix_Chinchilla-0Y3_v1.6_truth.31.fits')
evt_data = Table(hdul[1].data)
photoz = evt_data['Z']
ra = evt_data['RA']
dec = evt_data['DEC']
galaxy_ID = evt_data['ID']
#print(galaxy_ID)

outfile = open('gauss_dist_609Mpc_sims_o3.txt', 'w')
outfile.write('#ID, galaxyID, RA, DEC, DIST, DIST_ERR\n')

cosmo = FlatLambdaCDM(H0=67.8, Om0=0.308)

D = [] 
i = 100
for i in range(0,10): #how many events you want 
    D_evento3=random.gauss(609.9,200) #mean dist from https://arxiv.org/pdf/1709.08079.pdf, O3 avg dist.
    #print(D_evento3)
    z=z_at_value(cosmo.luminosity_distance, D_evento3*u.megaparsec)
    
    
    bcc_nearest = find_nearest(photoz, z) #find the z in the catalog that is nearest to the calculated z from event
    bcc_z = bcc_nearest[1]
    #print(bcc_z)
    RA = np.asarray(ra)[bcc_nearest[0]]
    DEC = np.asarray(dec)[bcc_nearest[0]]
    galaxyID = np.asarray(galaxy_ID)[bcc_nearest[0]]
Exemplo n.º 25
0
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as colormap

from astropy.cosmology import FlatLambdaCDM
from astropy import units as u

from scipy.interpolate import interp1d
from scipy.optimize import curve_fit

import pandas as pd

import gc

# Set a function that returns the redshift for a given look-back time
model = FlatLambdaCDM(H0=70, Om0=0.3)
z = np.linspace(0.0, 20.0, 1000)
t = model.lookback_time(z)

redshift_from_time = interp1d(t.value, z)


def analytical_mass_metallicity(z, stellar_mass):
    """Analytical mass-metallicty relationship as computed by Maiolino 2008

    Args:
        z: redshift, valid values are 0.07, 0.7, 2.2 and 3.5
        stellar_mass: stellar mass in MSun

    Returns:
        Oxygen abundance in 12 + log10(O/H)
from scipy.interpolate import splev, splrep
import emcee 
import corner

from multiprocessing import Pool
import pickle

import os
import sys
import socket
import time
import datetime as dt

from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
astropy_cosmo = FlatLambdaCDM(H0=70 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=0.3)

import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec

start = time.time()
print("Starting at:", dt.datetime.now())

# Assign directories and custom imports
home = os.getenv('HOME')
stacking_utils = home + '/Documents/GitHub/stacking-analysis-pears/util_codes/'

roman_slitless_dir = home + "/Documents/GitHub/roman-slitless/"
ext_spectra_dir = home + "/Documents/roman_slitless_sims_results/"
roman_sims_seds = home + "/Documents/roman_slitless_sims_seds/"
emcee_diagnostics_dir = home + "/Documents/emcee_runs/emcee_diagnostics_roman/"
Exemplo n.º 27
0
from __future__ import (division, print_function, absolute_import)

import re
import glob
import os
import numpy as np
from astropy.table import Table
from astropy.io import ascii
from scipy.interpolate import interp1d
from warnings import warn

from utils import table_to_array

from default import PROJECT_DIRECTORY
from astropy.cosmology import FlatLambdaCDM
default_cosmo = FlatLambdaCDM(H0=70, Om0=0.3)

__all__ = ['HaloProps', 'Orphans', 'PWGH', 'MAH', 'Continued_Growth']


class HaloProps(object):
    """
    class to carry over halo properties to galaxy mock
    """
    def __init__(self,
                 haloprop_keys=[
                     'halo_mpeak', 'halo_vpeak', 'halo_acc_scale',
                     'halo_half_mass_scale'
                 ],
                 **kwargs):
        """
Exemplo n.º 28
0
    val1[slct] = val2[slct]
    val2[slct] = tmp


def rot_slct(slct, val1, val2, val3):
    """moves the selected rows in val1 into val2, val2 into val3, and val3
    into val1
    """
    tmp = val1[slct]
    val1[slct] = val3[slct]
    val3[slct] = val2[slct]
    val2[slct] = tmp


start_time = time.time()
cosmoAQ = FlatLambdaCDM(H0=71.0, Om0=0.2648)

param = dtk.Param(sys.argv[1])

output = param.get_string("output")
use_mr_cut = param.get_bool('use_mr_cut')
mr_cut = param.get_int('mr_cut')
gltcs_file_list = param.get_string_list('gltcs_file_list')
lc_rot_info_loc = param.get_string('lc_rot_info_loc')
box = param.get_bool('box')
gltcs_file = gltcs_file_list[0]
gltcs_param_group = h5py.File(gltcs_file, 'r')["Parameters"]
output_mod = output.replace('.hdf5', '_mod.hdf5')
stepz = dtk.StepZ(200, 0, 500)
zs = np.linspace(0, 1.5, 1000)
z_to_dl = interp1d(zs, cosmo.luminosity_distance(zs))
def main():

    use_vmax_weights = True

    # load sample selection
    from astropy.table import Table
    fpath = '../data/SDSS_Main/'
    fname = 'sdss_vagc.hdf5'
    t = Table.read(fpath+fname, path='data')

    from astropy.cosmology import FlatLambdaCDM
    cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05, Tcmb0=2.7255)

    disks = t['FRACPSF'][:,2] < 0.8
    ellipticals = t['FRACPSF'][:,2] >= 0.8

    # make completeness cut
    from estimate_completeness import z_lim
    zz = z_lim(t['ABSMAG_r0.1'], cosmo)
    comp_mask = (t['Z'] <= zz)

    mask_1 = (t['ABSMAG_r0.1'] > -18) & (t['ABSMAG_r0.1'] <= -17) & comp_mask 
    mask_2 = (t['ABSMAG_r0.1'] > -19) & (t['ABSMAG_r0.1'] <= -18) & comp_mask 
    mask_3 = (t['ABSMAG_r0.1'] > -20) & (t['ABSMAG_r0.1'] <= -19) & comp_mask 
    mask_4 = (t['ABSMAG_r0.1'] > -21) & (t['ABSMAG_r0.1'] <= -20) & comp_mask 
    mask_5 = (t['ABSMAG_r0.1'] > -22) & (t['ABSMAG_r0.1'] <= -21) & comp_mask 
    mask_6 = (t['ABSMAG_r0.1'] > -23) & (t['ABSMAG_r0.1'] <= -22) & comp_mask 

    N_1 = np.sum(mask_1)
    N_2 = np.sum(mask_2) 
    N_3 = np.sum(mask_3) 
    N_4 = np.sum(mask_4) 
    N_5 = np.sum(mask_5)
    N_6 = np.sum(mask_5)
    print('number of galaxies in samples 1-6:')
    print(N_1, N_2, N_3, N_4, N_5, N_6)

    # calculate vmax for each galaxy
    from astropy.cosmology import FlatLambdaCDM
    cosmo = FlatLambdaCDM(H0=70, Om0=0.3, Ob0=0.05, Tcmb0=2.7255)
    
    if use_vmax_weights:
        from estimate_completeness import vmax as vmax_func
        vmax = vmax_func(t['ABSMAG_r0.1'], cosmo)
    else:
        vmax = np.ones(len(t))

    w = 1.0/(t['FGOTMAIN']*vmax)

    mask = mask_1 & disks 
    f_disk_1 = 1.0*np.sum(w[mask])/np.sum(w[mask_1])

    mask = mask_2 & disks 
    f_disk_2 = 1.0*np.sum(w[mask])/np.sum(w[mask_2])

    mask = mask_3 & disks 
    f_disk_3 = 1.0*np.sum(w[mask])/np.sum(w[mask_3])

    mask = mask_4 & disks
    f_disk_4 = 1.0*np.sum(w[mask])/np.sum(w[mask_4])

    mask = mask_5 & disks
    f_disk_5 = 1.0*np.sum(w[mask])/np.sum(w[mask_5])

    mask = mask_6 & disks
    f_disk_6 = 1.0*np.sum(w[mask])/np.sum(w[mask_6])

    print('disk fraction in samples 1-6:')
    print(f_disk_1,f_disk_2,f_disk_3,f_disk_4,f_disk_5,f_disk_6)

    f_disk = np.array([f_disk_1, f_disk_2, f_disk_3, f_disk_4, f_disk_5, f_disk_6])
    bin_centers = [-17.5,-18.5,-19.5,-20.5,-21.5,-22.5]

    #fpath = './data/'
    #fname = 'disk_fraction.dat'
    #ascii.write([bin_centers, f_disk], fpath+fname,
    #            names=['mag', 'f_disk'], overwrite=True)

    # bootstrap error estimate
    Nboot = 1000
    N = len (w)
    inds = np.arange(0,N)

    f = t['FRACPSF'][:,2]
    m =t['ABSMAG_r0.1']

    f_disk_1 = np.zeros(Nboot)
    f_disk_2 = np.zeros(Nboot)
    f_disk_3 = np.zeros(Nboot)
    f_disk_4 = np.zeros(Nboot)
    f_disk_5 = np.zeros(Nboot)
    f_disk_6 = np.zeros(Nboot)
    for i in range(Nboot):
        idx = np.random.choice(inds, size=N)
        ww = w[idx]
        ff = f[idx]
        mm = m[idx]
        
        disks = (ff < 0.8)

        mask_1 = (mm > -18) & (mm <= -17)
        mask_2 = (mm > -19) & (mm <= -18)
        mask_3 = (mm > -20) & (mm <= -19)
        mask_4 = (mm > -21) & (mm <= -20)
        mask_5 = (mm > -22) & (mm <= -21)
        mask_6 = (mm > -23) & (mm <= -22)

        f_disk_1[i] = 1.0*np.sum(ww[mask_1 & disks])/np.sum(ww[mask_1])
        f_disk_2[i] = 1.0*np.sum(ww[mask_2 & disks])/np.sum(ww[mask_2])
        f_disk_3[i] = 1.0*np.sum(ww[mask_3 & disks])/np.sum(ww[mask_3])
        f_disk_4[i] = 1.0*np.sum(ww[mask_4 & disks])/np.sum(ww[mask_4])
        f_disk_5[i] = 1.0*np.sum(ww[mask_5 & disks])/np.sum(ww[mask_5])
        f_disk_6[i] = 1.0*np.sum(ww[mask_6 & disks])/np.sum(ww[mask_6])

    y = [np.mean(f_disk_1),np.mean(f_disk_2),np.mean(f_disk_3),np.mean(f_disk_4),np.mean(f_disk_5),np.mean(f_disk_6)]
    err = [np.std(f_disk_1),np.std(f_disk_2),np.std(f_disk_3),np.std(f_disk_4),np.std(f_disk_5),np.std(f_disk_6)]

    fpath = './data/'
    fname = 'disk_fraction.dat'
    ascii.write([bin_centers, y, err], fpath+fname,
                names=['mag', 'f_disk', 'err'], overwrite=True)
def avgmat(base_output, mat1, mat2, lc1, lc2, output_dir="COSMO"):
    import numpy as np
    import matplotlib.pyplot as plt
    import array
    import math
    import os
    from astropy import cosmology as cosmo
    from astropy.cosmology import FlatLambdaCDM
    import re

    cosmo2 = FlatLambdaCDM(H0=70, Om0=0.3)

    list1, z1, mb1, mb1e = np.loadtxt(output_dir + "/lcparam_" + lc1 + ".txt",
                                      usecols=(0, 1, 4, 5),
                                      unpack=True,
                                      dtype="string")
    z1 = z1.astype(float)
    mb1 = mb1.astype(float)
    mb1e = mb1e.astype(float)

    x = cosmo2.luminosity_distance(z1).value
    mu_syn1 = 5.0 * (np.log10(x)) + 25.0 - 19.35
    mu1 = mb1 - mu_syn1

    list2, z2, mb2, mb2e = np.loadtxt(output_dir + "/lcparam_" + lc2 + ".txt",
                                      usecols=(0, 1, 4, 5),
                                      unpack=True,
                                      dtype="string")

    z2 = z1  # using z1 so z lines up
    mb2 = mb2.astype(float)
    mb2e = mb2e.astype(float)

    x = cosmo2.luminosity_distance(z2).value
    mu_syn2 = 5.0 * (np.log10(x)) + 25.0 - 19.35
    mu2 = mb2 - mu_syn2

    mua = (mu1 + mu2) / 2.0
    muae = (mb1e + mb2e) / 2.0
    mua = mu_syn1 + mua
    print(output_dir + "/lcparam_" + lc1 + ".txt")
    print(output_dir + "/lcparam_" + lc2 + ".txt")
    # stop
    # print z1
    # stop
    f1 = open(output_dir + "/lcparam_" + base_output + ".txt",
              "w")  # this is the file for cosmomc
    f1.write(
        "#name zcmb zhel dz mb dmb x1 dx1 color dcolor 3rdvar d3rdvar cov_m_s cov_m_c cov_s_c set ra dec biascor \n"
    )  # standard format
    for x in range(0, len(z1)):
        f1.write(
            str(list1[x]) + " " + str(z1[x]) + " " + str(z1[x]) + " 0.0 " +
            str(mua[x]) + " " + str(muae[x]) + " 0 0 0 0 0 0 0 0 0 0 0 0\n")
    f1.close()
    print(output_dir + "/sys_" + mat1 + ".txt")
    print(output_dir + "/sys_" + mat2 + ".txt")

    sys1 = np.loadtxt(output_dir + "/sys_" + mat1 + ".txt",
                      unpack=True,
                      dtype="string")
    sys2 = np.loadtxt(output_dir + "/sys_" + mat2 + ".txt",
                      unpack=True,
                      dtype="string")
    scount = sys1[0]
    sys1 = sys1.astype(float)
    sys2 = sys2.astype(float)
    savg = (sys1 + sys2) / 2.0
    sys3 = open(output_dir + "/sys_" + base_output + ".txt", "w")
    sys3.write(str(scount) + "\n")
    for x in range(1, len(sys1)):
        sys3.write(str(savg[x]) + "\n")
    sys3.close()
    dataset(output_dir, base_output, "", "", sys=1)
    dataset(output_dir, base_output, "_nosys", "", sys=0)
    print(output_dir + "/sys_" + base_output + ".txt")