def compute_date_of_max(options):
    import numpy
    from astropy.table import Table
    import JLA_library as JLA

    params=JLA.build_dictionary(options.config)

    # -----------  Read in the configuration file ------------


    lightCurveFits=JLA.get_full_path(params['lightCurveFits'])
    lightCurves=JLA.get_full_path(params['lightCurves'])
    adjlightCurves=JLA.get_full_path(params['adjLightCurves'])


    # ---------  Read in the list of SNe ---------------------
    SNe = Table.read(lightCurveFits, format='fits')

    nSNe=len(SNe)
    print 'There are %d SNe' % (nSNe)

    # -----------   The lightcurve fitting -------------------

    J=[]

    for SN in SNe:
        SNfile='lc-'+SN['name']+'.list'
        #print 'Examining %s' % SN['name']
        inputFile=lightCurves+SNfile
        outputFile=adjlightCurves+SNfile
        # If needed refit the lightcurve and insert the date of maximum into the input file
        JLA.insertDateOfMax(SN['name'].strip(),inputFile,outputFile,options.force)

    return
示例#2
0
def compute_ZP(options):

    import JLA_library as JLA
    import numpy as np

    params = JLA.build_dictionary(options.config)

    # Read in the standard star

    standard = JLA.spectrum(
        JLA.get_full_path(params['magSys']) + options.standard)

    # Read in the filter

    filt = JLA.filterCurve(
        JLA.get_full_path(params['filterDir']) + options.filter)

    # Compute the ZP
    if options.system == 'AB':
        print '%s in %s %s %5.3f' % (options.standard, options.filter,
                                     options.system, filt.AB(standard))
    else:
        pass


#        print '%s in %s %s %5.3f' % (options.standard,options.filter,options.system,filt.Vega(standard))

    return
示例#3
0
def compute_date_of_max(options):
    import numpy
    from astropy.table import Table
    import JLA_library as JLA

    params = JLA.build_dictionary(options.config)

    # -----------  Read in the configuration file ------------

    lightCurveFits = JLA.get_full_path(params['lightCurveFits'])
    lightCurves = JLA.get_full_path(params['lightCurves'])
    adjlightCurves = JLA.get_full_path(params['adjLightCurves'])

    # ---------  Read in the list of SNe ---------------------
    SNe = Table.read(lightCurveFits, format='fits')

    nSNe = len(SNe)
    print 'There are %d SNe' % (nSNe)

    # -----------   The lightcurve fitting -------------------

    J = []

    for SN in SNe:
        SNfile = 'lc-' + SN['name'] + '.list'
        #print 'Examining %s' % SN['name']
        inputFile = lightCurves + SNfile
        outputFile = adjlightCurves + SNfile
        # If needed refit the lightcurve and insert the date of maximum into the input file
        JLA.insertDateOfMax(SN['name'].strip(), inputFile, outputFile,
                            options.force)

    return
示例#4
0
def add_covar_matrices(covmatrices, diag):
    """
    Python program that adds the individual covariance matrices into a single matrix
    """

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA

    # Read in the terms that account for uncertainties in perculiar velocities,
    # instrinsic dispersion and, lensing

    # Read in the covariance matrices
    matrices = []
    for matrix in covmatrices:
        matrices.append(fits.getdata(JLA.get_full_path(covmatrices[matrix]),
                                     0))
        # Test for NaNs and replace them with zero
        if numpy.isnan(matrices[-1]).any():
            print 'Found a NaN in %s ... replacing them with zero' % (
                covmatrices[matrix])
            print numpy.isnan(matrices[-1]).sum()
            matrices[-1][numpy.isnan(matrices[-1])] = 0.0

    # Add the matrices
    size = matrices[0].shape
    add = numpy.zeros(size[0]**2.).reshape(size[0], size[0])
    for matrix in matrices:
        add += matrix

    # Compute A

    nSNe = size[0] / 3

    jla_results = {'Om': 0.303, 'w': -1.027, 'alpha': 0.141, 'beta': 3.102}

    arr = numpy.zeros(nSNe * 3 * nSNe).reshape(nSNe, 3 * nSNe)

    for i in range(nSNe):
        arr[i, 3 * i] = 1.0
        arr[i, 3 * i + 1] = jla_results['alpha']
        arr[i, 3 * i + 2] = -jla_results['beta']

    cov = numpy.matrix(arr) * numpy.matrix(add) * numpy.matrix(arr).T

    # Add the diagonal terms

    sigma = numpy.genfromtxt(JLA.get_full_path(diag),
                             comments='#',
                             usecols=(0, 1, 2),
                             dtype='f8,f8,f8',
                             names=['sigma_coh', 'sigma_lens', 'sigma_pecvel'])

    for i in range(nSNe):
        cov[i, i] += sigma['sigma_coh'][i]**2 + \
        sigma['sigma_lens'][i]**2 + \
        sigma['sigma_pecvel'][i]**2

    return cov
示例#5
0
def compute_model(options):

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA
    from astropy.table import Table
    from astropy.cosmology import FlatwCDM
    from scipy.interpolate import interp1d


    # -----------  Read in the configuration file ------------
    params=JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '').replace('_smp', '')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    print 'There are %d SNe' % (nSNe)

    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe = SNe[indices]

    redshift = SNe['zcmb']
    replace=(redshift < 0)

    # For SNe that do not have the CMB redshift
    redshift[replace]=SNe[replace]['zhel']
    print len(redshift)

    if options.raw:
        # Data from the bottom left hand figure of Mosher et al. 2014.
        # This is option ii) that is descibed above
        offsets=Table.read(JLA.get_full_path(params['modelOffset']),format='ascii.csv')
        Delta_M=interp1d(offsets['z'], offsets['offset'], kind='linear',bounds_error=False,fill_value='extrapolate')(redshift)
    else:
        Om_0=0.303 # JLA value in the wCDM model
        cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.0)
        cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.024)
        Delta_M=5*numpy.log10(cosmo1.luminosity_distance(redshift)/cosmo2.luminosity_distance(redshift))
    
    # Build the covariance matrix. Note that only magnitudes are affected
    Zero=numpy.zeros(nSNe)
    H=numpy.concatenate((Delta_M,Zero,Zero)).reshape(3,nSNe).ravel(order='F')
    C_model=numpy.matrix(H).T * numpy.matrix(H)

    date = JLA.get_date()
    fits.writeto('C_model_%s.fits' % (date),numpy.array(C_model),clobber=True) 

    return None
def add_covar_matrices(covmatrices,diag):
    """
    Python program that adds the individual covariance matrices into a single matrix
    """

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA

    # Read in the terms that account for uncertainties in perculiar velocities, 
    # instrinsic dispersion and, lensing

    # Read in the covariance matrices
    matrices = []
    for matrix in covmatrices:
        matrices.append(fits.getdata(JLA.get_full_path(covmatrices[matrix]), 0))
        # Test for NaNs and replace them with zero
        if numpy.isnan(matrices[-1]).any():
            print 'Found a NaN in %s ... replacing them with zero' % (covmatrices[matrix])
            print numpy.isnan(matrices[-1]).sum()
            matrices[-1][numpy.isnan(matrices[-1])]=0.0

    # Add the matrices
    size = matrices[0].shape
    add = numpy.zeros(size[0]**2.).reshape(size[0], size[0])
    for matrix in matrices:
        add += matrix

    # Compute A

    nSNe = size[0]/3

    jla_results = {'Om':0.303, 'w':-1.027, 'alpha':0.141, 'beta':3.102}

    arr = numpy.zeros(nSNe*3*nSNe).reshape(nSNe, 3*nSNe)

    for i in range(nSNe):
        arr[i, 3*i] = 1.0
        arr[i, 3*i+1] = jla_results['alpha']
        arr[i, 3*i+2] = -jla_results['beta']

    cov = numpy.matrix(arr) * numpy.matrix(add) * numpy.matrix(arr).T

    # Add the diagonal terms

    sigma = numpy.genfromtxt(JLA.get_full_path(diag),
                             comments='#',
                             usecols=(0, 1, 2),
                             dtype='f8,f8,f8',
                             names=['sigma_coh', 'sigma_lens', 'sigma_pecvel'])

    for i in range(nSNe):
        cov[i, i] += sigma['sigma_coh'][i]**2 + \
        sigma['sigma_lens'][i]**2 + \
        sigma['sigma_pecvel'][i]**2

    return cov
示例#7
0
def compute_dust(options):
    """Python program to compute C_dust
    """

    import numpy
    import astropy.io.fits as fits
    import os
    import JLA_library as JLA

    # ---------- Read in the SNe list -------------------------

    SNelist = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S110',
                               names=['id', 'lc'])

    for i, SN in enumerate(SNelist):
        SNelist['id'][i] = SNelist['id'][i].replace('lc-','').replace('.list','')

    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)
    try:
        salt_path = JLA.get_full_path(params['defsaltModel'])
    except KeyError:
        salt_path = ''
        
    # -----------   The lightcurve fitting -------------------

    # Compute the offset between the nominal value of the extinciton 
    # and the adjusted value
    # We first compute the difference in light curve fit parameters for E(B-V) * (1+offset)
    offset = 0.1

    j = []

    for SN in SNelist:
        inputFile = SN['lc']
        print 'Fitting %s ' % (SN['lc'])
        workArea = JLA.get_full_path(options.workArea)
        dm, dx1, dc = JLA.compute_extinction_offset(SN['id'], inputFile, offset, workArea, salt_path)
        j.extend([dm, dx1, dc])
    
    # But we want to compute the impact of an offset that is twice as large, hence the factor of 4 in the expression
    # 2017/10/13
    # But we want to compute the impact of an offset that is half as large, hence the factor of 4 in the denominator
    # cdust = numpy.matrix(j).T * numpy.matrix(j) * 4.0
    cdust = numpy.matrix(j).T * numpy.matrix(j) / 4.0

    date = JLA.get_date()

    fits.writeto('C_dust_%s.fits' % date, cdust, clobber=True) 

    return
def updateDES(options,params,model):
    try:
        shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/DECam')
    except:
        pass
    shutil.copytree(JLA.get_full_path(params['DES_instrument']),options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/DECam')
    
    # Update the DES magnitude system
    shutil.copy(JLA.get_full_path(params['DES_magsys']),options.output+'/'+model['modelNumber']+'/snfit_data/MagSys/')

    return
def convert_lightcurves(options):

    # Read in the configuration file
    # The configuraiton file contains the location of various files
    params=JLA.build_dictionary(options.config)

    # Read in the extra variance
    # This depends on the photometric method. It is lower for SMP 
    extraVariance=get_extra_variance(JLA.get_full_path(params['extraVariance']),options)

    # Read in the extinction values
    # A temporary fix as the lightcurves do not currently have it
    # Still needed
    extinction=get_extinction(JLA.get_full_path(params['extinction']),options)

    snanaDir=JLA.get_full_path(params['snanaLightCurves'])
    saltDir=JLA.get_full_path(params['adjLightCurves'])

    try:
        os.mkdir(saltDir)
    except:
        pass
        
    saltDir=saltDir+'DES/'

    try:
        os.mkdir(saltDir)
    except:
        pass

    for lightcurve in os.listdir(snanaDir):
        if '.dat' in lightcurve:
            # Read in the snana file
            lc=snanaLightCurve(snanaDir+lightcurve)
            lightCurveFile=saltDir+lightcurve.replace('des_real','lc-DES').replace('.dat','.list')
            if lc.parameters['TYPE'].split()[0] in ['1','101']:   # Is a SN Ia or a SN Ia?
                print lightcurve, lightCurveFile
                lc.clean()                                # Remove bad photometry
                lc.addNoise(extraVariance)                # Add additional variance to the lightcurve points
                # It is not clear if we need to compute a rough date of max before doing the more precise fit
                lc.estimateDateOfMax(options)             # Sets an approximate date of max for the light curve fitting done below.
                # Apply cuts
                # lc.applySNCuts()
                # lc.applySamplingCuts()
                lc.write(lightCurveFile,options.format)   # Write out the resutlt
                lc.fitDateOfMax(lightCurveFile,params)    # Get a more precise estimate of the data of peak brightness
                lc.updateExtinction(lightCurveFile,extinction) # Temporary code
    return
示例#10
0
def compute_date_of_max(options):
    import numpy
    from astropy.table import Table
    import JLA_library as JLA

    params=JLA.build_dictionary(options.config)

    # ----------- Correction factor for extinction -----------
    # See ApJ 737 103
    extinctionFactor=0.86

    # -----------  Read in the configuration file ------------

    lightCurveFits=JLA.get_full_path(params['lightCurveFits'])
    lightCurves=JLA.get_full_path(params['lightCurves'])
    adjlightCurves=JLA.get_full_path(params['adjLightCurves'])

    # ---------  Read in the list of SNe ---------------------
    # One can either use an ASCII file with the SN list or a fits file
    if options.SNlist == None:
        SNe = Table.read(lightCurveFits, format='fits')
    else:
    # We use the ascii file, which gives the full path name
        SNe = Table.read(options.SNlist, format='ascii',names=['name','type','lc'],data_start=0)

    nSNe=len(SNe)
    print 'There are %d SNe' % (nSNe)

    # -----------   The lightcurve fitting -------------------

    for SN in SNe:
        if options.SNlist == None:
            SNfile='lc-'+SN['name']+'.list'
            inputFile=lightCurves+SNfile
            outputFile=adjlightCurves+SNfile
        else:
            inputFile=SN['lc']
            outputFile=SN['lc'].replace(lightCurves,adjlightCurves)

        print 'Examining %s' % SN['name']
        # If needed refit the lightcurve and insert the date of maximum into the input file
        JLA.insertDateOfMax(SN['name'].strip(),inputFile,outputFile,options.force,params)
        # Shouldn't we adjust the extinction first
        if options.adjustExtinction:
            adjustExtinction(outputFile,extinctionFactor)
    return
def updateKeplercam(options,params,model):
    try:
        shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/Keplercam')
    except:
        pass
    shutil.copytree(JLA.get_full_path(params['KelplerCam_instrument']),options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/Keplercam')
    
    return
示例#12
0
def compute_model(options):

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA
    from astropy.table import Table
    from astropy.cosmology import FlatwCDM

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-',
                                                    '').replace('.list', '')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    print 'There are %d SNe' % (nSNe)

    #z=numpy.array([])
    #offset=numpy.array([])
    Om_0 = 0.303  # JLA value in the wCDM model

    cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.0)
    cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.024)

    # For the JLA SNe
    redshift = SNe['zcmb']
    replace = (redshift < 0)
    # For the non JLA SNe
    redshift[replace] = SNe[replace]['zhel']

    Delta_M = 5 * numpy.log10(
        cosmo1.luminosity_distance(redshift) /
        cosmo2.luminosity_distance(redshift))

    # Build the covariance matrix. Note that only magnitudes are affected
    Zero = numpy.zeros(nSNe)
    H = numpy.concatenate((Delta_M, Zero, Zero)).reshape(3,
                                                         nSNe).ravel(order='F')
    C_model = numpy.matrix(H).T * numpy.matrix(H)

    date = JLA.get_date()
    fits.writeto('C_model_%s.fits' % (date),
                 numpy.array(C_model),
                 clobber=True)

    return None
示例#13
0
def compute_Cstat(options):
    """Python program to compute C_stat
    """

    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table
    import JLA_library as JLA

    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')


    # -----------  Read in the data --------------------------

    print 'There are %d SNe in the sample' % (nSNe)

    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe=SNe[indices]

    C_stat=numpy.zeros(9*nSNe*nSNe).reshape(3*nSNe,3*nSNe)

    for i,SN in enumerate(SNe):
        cov=numpy.zeros(9).reshape(3,3)
        cov[0,0]=SN['dmb']**2.
        cov[1,1]=SN['dx1']**2.
        cov[2,2]=SN['dcolor']**2.
        cov[0,1]=SN['cov_m_s']
        cov[0,2]=SN['cov_m_c']
        cov[1,2]=SN['cov_s_c']
        # symmetrise
        cov=cov+cov.T-numpy.diag(cov.diagonal())
        C_stat[i*3:i*3+3,i*3:i*3+3]=cov

    # -----------  Read in the base matrix computed using salt2_stat.cc ------------

    if options.base!=None:
        C_stat+=fits.getdata(options.base)

    date = JLA.get_date()
    fits.writeto('C_stat_%s.fits' % date,C_stat,clobber=True) 

    return
示例#14
0
def updateKeplercam(options,params,model):
    try:
        shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/Keplercam')
    except:
        pass
    shutil.copytree(JLA.get_full_path(params['CfA_instrument']),options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/Keplercam')
    
    # The following is not needed
    ##shutil.copy(JLA.get_full_path(params['CfA_magsys']),options.output+'/'+model['modelNumber']+'/snfit_data/MagSys/')
    return
示例#15
0
def compute_model(options):

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA
    from astropy.table import Table
    from astropy.cosmology import FlatwCDM



    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    print 'There are %d SNe' % (nSNe)

    #z=numpy.array([])
    #offset=numpy.array([])
    Om_0=0.303 # JLA value in the wCDM model

    cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.0)
    cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=Om_0, w0=-1.024)
    
    # For the JLA SNe
    redshift = SNe['zcmb']
    replace=(redshift < 0)
    # For the non JLA SNe
    redshift[replace]=SNe[replace]['zhel']

    Delta_M=5*numpy.log10(cosmo1.luminosity_distance(redshift)/cosmo2.luminosity_distance(redshift))

    # Build the covariance matrix. Note that only magnitudes are affected
    Zero=numpy.zeros(nSNe)
    H=numpy.concatenate((Delta_M,Zero,Zero)).reshape(3,nSNe).ravel(order='F')
    C_model=numpy.matrix(H).T * numpy.matrix(H)

    date = JLA.get_date()
    fits.writeto('C_model_%s.fits' % (date),numpy.array(C_model),clobber=True) 

    return None
示例#16
0
def runSALT(SALTpath, SALTmodel, salt_prefix, inputFile, SN):
    import os
    
    # Set up the path to the SALT model and the name of the outputFile
    os.environ['SALTPATH']=SALTpath+SALTmodel['directory']+'/snfit_data/'
    outputFile=JLA.get_full_path(options.workArea)+'/'+SN+'/'+SN+'_'+SALTmodel['directory']+'.dat'
    if os.path.isfile(outputFile):
        pass
        #print "Skipping, fit with SALT model %s for %s already done" % (SALTmodel['directory'],os.path.split(inputFile)[1])
    else:
        # Otherwise, do the fit with the date of Max set to the value in the lightcurve file
        JLA.fitLC(inputFile, outputFile, salt_prefix, forceDayMax=True)
    return outputFile
示例#17
0
def prop_unc(params,filt,spectrum=None):

    # Use the filterwheel to find the filename of the filter
    filterDir=params['DES_instrument']
    filterWheel=JLA.get_full_path(filterDir)+'/Filterwheel'
    filterNames=numpy.genfromtxt(filterWheel,comments='#',usecols=(0,1),dtype='S1,S30',
                                 names=['filterName','filename'])
    filter_filename=filterNames[filterNames['filterName']==filt['filter'][-1:]]['filename'][0]

    # Read in the filter curve
    filterCurve=JLA.filterCurve(JLA.get_full_path(filterDir)+'/'+filter_filename)

    # Set the magnitude of the filter offset
    offset=filt['wavelength']*10.  

    # We compute a number of integrals. First with the filtercurves as is, then with an offset added to the filter curve
    # i) The I0 integral 
    error_I0=2.5 * numpy.log10(filterCurve.I0(0.0)/filterCurve.I0(offset))
    # ii) The chromatic correction.
    # Assumed to be zero for now
    # If the standard filter transmission curves are shifted by 5nm, then all the filters will be out by that same amount
    # This may mean, that the offset is quite small
    #mean_wavelength=filterCurve.mean()
    #I10_std=filterCurve.I1(mean_wavelength,0.0) / filterCurve.I0(0.0)
    #I10_std_offset=filterCurve.I1(mean_wavelength,10.0) / filterCurve.I0(10.0)

    error_chromatic=0.0

    # iii) The error in the AB offset
    # We use the standard filter curve to compute the AB offset
    if spectrum==None:
        calspec=JLA.spectrum(fits.getdata(JLA.get_full_path(params['calspec']),1),'CALSPEC')
    else:
        calspec=JLA.spectrum(fits.getdata(JLA.get_full_path(spectrum),1),'CALSPEC')

    error_AB=filterCurve.AB(calspec)-filterCurve.AB(calspec,offset)
    return error_I0,error_chromatic,error_AB
示例#18
0
def compute_ZP(options):

    import JLA_library as JLA
    import numpy as np

    params=JLA.build_dictionary(options.config)

    # Read in the standard star

    standard=JLA.spectrum(JLA.get_full_path(params['magSys'])+options.standard)

    # Read in the filter

    filt=JLA.filterCurve(JLA.get_full_path(params['filterDir'])+options.filter)

    # Compute the ZP
    if options.system=='AB':
        print '%s in %s %s %5.3f' % (options.standard,options.filter,options.system,filt.AB(standard))
    else:
        pass
#        print '%s in %s %s %5.3f' % (options.standard,options.filter,options.system,filt.Vega(standard))


    return
def compute_filterTransVar(options):
    import JLA_library as JLA
    import numpy
    from astropy.table import Table

    eff=[]
    # Read in the filter curves
    filt=Table.read(JLA.get_full_path(options.filter),format='ascii.csv')
    for col in filt.colnames:
        if 'ccd' in col and 'amp' not in col:
            f=JLA.filterCurve(filt['wavelength'],filt[col])
            eff.append(f.eff())

    print 'Examined the transmission curves for %d CCDs' % (len(eff))
    print 'Mean effective wavelength is %6.1f' % (numpy.mean(eff))
    print 'Range of effective wavelength is %6.1f-%6.1f' % (numpy.min(eff),numpy.max(eff))
    print 'RMS effective wavelength %6.1f' % (numpy.std(eff))

    return
示例#20
0
 def fitDateOfMax(self,lightCurveFile,params):
     # A full salt2 fit
     outputFile=lightCurveFile.replace('.list','.res')
     os.environ['SALTPATH']=JLA.get_full_path(params['defsaltModel'])
     JLA.fitLC(lightCurveFile, outputFile, salt_prefix='')
     self.dateofMax,self.dateofMaxError=JLA.getDateOfMax(outputFile)
     # Remove the old date of max and insert the new one
     lc=open(lightCurveFile)
     lc_lines=lc.readlines()
     lc.close()
     lc=open(lightCurveFile,'w')
     lc.write('@DayMax %s %s\n' % (self.dateofMax,self.dateofMaxError))
     for line in lc_lines:
         if 'DayMax' in line:
             pass
         else:
             lc.write(line)
     lc.close()
     return
示例#21
0
def compute_bias(options):

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA
    from astropy.table import Table
    from astropy.cosmology import FlatwCDM
    from  scipy.optimize import leastsq
    import matplotlib.pyplot as plt
    from scipy.stats import t


    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = Table(numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc']))
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '')
        
    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    print 'There are %d SNe' % (nSNe)

    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe=SNe[indices]
    # Add a column that records the error in the bias
    SNe['e_bias'] = numpy.zeros(nSNe,'f8')


    # Read in the points from B14 figure 
    # Fit a polynomial to the data
    # Determine the uncertainties

    bias = numpy.genfromtxt(JLA.get_full_path(params['biasPolynomial']),
                                  skip_header=3,
                                  usecols=(0, 1, 2, 3),
                                  dtype='S10,f8,f8,f8',
                                  names=['sample', 'redshift', 'bias', 'e_bias'])

    
    if options.plot:
        fig=plt.figure()
        ax=fig.add_subplot(111)
        colour={'nearby':'b','SNLS':'r','SDSS':'g'}

    for sample in numpy.unique(bias['sample']):
        selection=(bias['sample']==sample)
        guess=[0,0,0]
        
        plsq=leastsq(residuals, guess, args=(bias[selection]['bias'],
                                             bias[selection]['redshift'],
                                             bias[selection]['e_bias'],
                                             'poly'), full_output=1)

        if plsq[4] in [1,2,3,4]:
            print 'Solution for %s found' % (sample)

        if options.plot:
            ax.errorbar(bias[selection]['redshift'],
                    bias[selection]['bias'],
                    yerr=bias[selection]['e_bias'],
                    ecolor='k',
                    color=colour[sample],
                    fmt='o',
                    label=sample)
            z=numpy.arange(numpy.min(bias[selection]['redshift']),numpy.max(bias[selection]['redshift']),0.001)
            ax.plot(z,poly(z,plsq[0]),color=colour[sample])

        # For each SNe, determine the uncerainty in the correction. We use the covariance martix
        # prediction bounds for the fitted curve. 
        # https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html
        
        # Compute the chi-sq.
        chisq=(((bias[selection]['bias']-poly(bias[selection]['redshift'],plsq[0]))/bias[selection]['e_bias'])**2.).sum()
        dof=selection.sum()-len(guess)
        print "Reduced chi-square value for sample %s is %5.2e" % (sample, chisq / dof)

        alpha=0.315 # Confidence interval is 100 * (1-alpha)
        # Compute the upper alpha/2 vallue for the student t distribution with dof
        thresh=t.ppf((1-alpha/2.0), dof)
        
        if options.plot:
            # The following is only valid for polynomial fitting functions
            upper_curve=[]
            lower_curve=[]
            for x in z:
                vect=numpy.matrix([1,x,x**2.])
                offset=thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])
                upper_curve.append(poly(x,plsq[0])+offset)
                lower_curve.append(poly(x,plsq[0])-offset)

            ax.plot(z,lower_curve,'--',color=colour[sample])
            ax.plot(z,upper_curve,'--',color=colour[sample])

        # Compute the error in the bias
        # We increase the absolute vlaue
        # In other words, if the bias is negative, we subtract the error to make it even more negative
        # We assume 100% correlation between SNe
        for i,SN in enumerate(SNe):
            if JLA.survey(SN) == sample:
                if SN['zcmb'] > 0:
                    redshift = SN['zcmb']
                else:
                    redshift = SN['zhel']
                vect = numpy.matrix([1,redshift,redshift**2.])
                if poly(redshift,plsq[0]) > 0:
                    sign = 1
                else:
                    sign = -1

                SNe['e_bias'][i] = sign * thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])
                # We are getting some unrealistcally large values

    if options.plot:
        ax.legend()
        plt.show()
        plt.close()

    # Compute the bias matrix
    # 

    date = JLA.get_date()
    Zero=numpy.zeros(nSNe)
    H=numpy.concatenate((SNe['e_bias'],Zero,Zero)).reshape(3,nSNe).ravel(order='F')

    C_bias = numpy.matrix(H)

    fits.writeto('C_bias_%s.fits' % (date),C_bias.T*C_bias,clobber=True) 

    return None
示例#22
0
def create_Models(options):
    import os

    params=JLA.build_dictionary(options.config)

    try:
        os.mkdir(options.output)
    except:
        print "Directory %s already exists" % (options.output)

    # Read in the SALT models that will be kept
    SALTmodels=Table.read(options.modelList,format='ascii',names=['ID','Description'],data_start=0)

    # Read in the models for which the magnitude will be adjusted
    try:
        magOffsets=Table.read(options.magOffsetList,format='ascii',names=['Model','Filter','Offset','MagSys'],data_start=1,delimiter='\t',comment='#')
    except:
        magOffsets=[]

    modelList=[]
    for model in os.listdir(JLA.get_full_path(options.base)):
        if model in SALTmodels['ID']:
            print "Copying across %s" % model
            modelList.append(model)
            shutil.copytree(options.base+'/'+model,options.output+'/'+model)
            # Copy salt2 directory to salt2-4
            shutil.copytree(options.output+'/'+model+'/snfit_data/salt2',options.output+'/'+model+'/snfit_data/salt2-4')
            # Update fitmodel.card
            shutil.copy(JLA.get_full_path(params['fitmodel']),options.output+'/'+model+'/snfit_data/fitmodel.card')
            # Add the DECam instrument files
            shutil.copytree(JLA.get_full_path(params['DES_instrument']),options.output+'/'+model+'/snfit_data/Instruments/DECam')
            # Update the Keplercam instrument files
            shutil.rmtree(options.output+'/'+model+'/snfit_data/Instruments/Keplercam')
            shutil.copytree(JLA.get_full_path(params['CfA_instrument']),options.output+'/'+model+'/snfit_data/Instruments/Keplercam')
            ## Serious bug - filter and ZP offsets are lost here!
            # Add DES magnitude system
            shutil.copy(JLA.get_full_path(params['DES_magsys']),options.output+'/'+model+'/snfit_data/MagSys/')
            # Update the CfA magnitude system
            shutil.copy(JLA.get_full_path(params['CfA_magsys']),options.output+'/'+model+'/snfit_data/MagSys/')
            # This is not needed for CSP as the instrument files and magnitude system have not chnaged since JLA
        else:
            print "Excluding %s" % model


    print 'We start with %d models from JLA' % (len(modelList))

    # ---------  Add new models --------------

    newModels=Table.read(options.add,format='ascii', comment='#')
    for model in newModels:
        # Copy accross the base model
        shutil.copytree(JLA.get_full_path(model['baseModel']),options.output+'/'+model['modelNumber'])
        print 'Creating %s' % (model['modelNumber'])

        # Copy salt2 directory to salt2-4
        shutil.copytree(options.output+'/'+model['modelNumber']+'/snfit_data/salt2',options.output+'/'+model['modelNumber']+'/snfit_data/salt2-4')

        # Remove the old base instrument, if it exists and replace it with a new one
        try:
            shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel'])
        except:
            pass

        shutil.copytree(JLA.get_full_path(model['baseInstrument']+model['fitmodel']),options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel'])

        # Remove the old MagSys directory and replace it with the new one
        shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/MagSys')
        shutil.copytree(JLA.get_full_path(model['baseInstrument'])+'MagSys',options.output+'/'+model['modelNumber']+'/snfit_data/MagSys')
        
        # Replace the fitmodel.card it with the new one
        shutil.copy(JLA.get_full_path(model['baseInstrument'])+'fitmodel.card',options.output+'/'+model['modelNumber']+'/snfit_data/fitmodel.card')

        # Modify filter curve and ZP
        if model['Type']=='filt':
            offsetFilter(options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel']+'/'+model['Filter'],model['Instrument'])
        else:
            offsetZP(options.output+'/'+model['modelNumber']+'/snfit_data/MagSys/'+model['MagSys'],model['ShortName'],model['Instrument'],model['fitmodel'])

        # We now update the list of instruments in the newly created surfaces
        # We should try to generalise this, as this will become very complex as more instruments are added.
        if model['Instrument']=='DECAM':
            # Update just the Keplercam instrument files
            updateKeplercam(options,params,model)
        elif model['Instrument']=='KEPLERCAM':
            # Update just the DES instrument files
            updateDES(options,params,model)
        else: # The case for swope ...
            # Update both the Keplercam and DES fles
            updateDES(options,params,model)
            updateKeplercam(options,params,model)


        modelList.append(model['modelNumber'])

    # ---- Update magnitude ZPs -----
    #for model in magOffsets:
    #    if numpy.abs(model['Offset']) > 0:
    #        magSys=options.output+'/'+model['Model']+'/snfit_data/MagSys/'+ model['MagSys']
    #        offsetZP2(magSys,model['Offset'],model['Filter'])


    print 'We now have %d models' % (len(modelList))
    
    # ---- Copy accross the saltModels.list ----

    shutil.copy(options.modelList,options.output+'/saltModels.list')
    

    return
示例#23
0
def merge_lightcurve_fits(options):
    """Pythom program to merge the lightcurve fit results into a sigle format"""
    import numpy
    import astropy
    import JLA_library as JLA
    from astropy.table import Table, MaskedColumn, vstack

    params = JLA.build_dictionary(options.config)

    # ---------------- JLA ------------------------
    lightCurveFits = JLA.get_full_path(params['JLAlightCurveFits'])
    f = open(lightCurveFits)
    header = f.readlines()
    f.close()
    names = header[0].strip('#').split()

    # I imagine that the tables package in astropy could also be used to read the ascii input file
    SNeSpec = Table(
        numpy.genfromtxt(
            lightCurveFits,
            skip_header=1,
            dtype=
            'S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
            names=names))

    nSNeSpec = len(SNeSpec)
    print 'There are %d SNe from the spectrscopically confirmed sample' % (
        nSNeSpec)

    # Add an extra column to the table
    SNeSpec['source'] = ['JLA'] * nSNeSpec

    # ---------------- Shuvo's sample ------------------------
    # Photometrically identified SNe in Shuvo's sample, if the parameter exists
    if params['photLightCurveFits'] != 'None':
        lightCurveFits = JLA.get_full_path(params['photLightCurveFits'])
        SNePhot = Table.read(lightCurveFits, format='fits')
        nSNePhot = len(SNePhot)

        print 'There are %d SNe from the photometric sample' % (nSNePhot)

        # Converting from Shuvo's names to thosed used by JLA
        conversion = {
            'name': 'name_adj',
            'zcmb': None,
            'zhel': 'z',
            'dz': None,
            'mb': 'mb',
            'dmb': 'emb',
            'x1': 'x1',
            'dx1': 'ex1',
            'color': 'c',
            'dcolor': 'ec',
            '3rdvar': 'col27',
            'd3rdvar': 'd3rdvar',
            'tmax': None,
            'dtmax': None,
            'cov_m_s': 'cov_m_x1',
            'cov_m_c': 'cov_m_c',
            'cov_s_c': 'cov_x1_c',
            'set': None,
            'ra': None,
            'dec': None,
            'biascor': None
        }

        # Add the uncertainty in the mass column
        SNePhot['d3rdvar'] = (SNePhot['col29'] +
                              SNePhot['col28']) / 2. - SNePhot['col27']

        # Remove columns that are not listed in conversion

        for colname in SNePhot.colnames:
            if colname not in conversion.values():
                SNePhot.remove_column(colname)

        for key in conversion.keys():
            # Rename the column if it does not already exist
            if conversion[key] != None and conversion[key] != key:
                SNePhot.rename_column(conversion[key], key)
            elif conversion[key] == None:
                # Create it, mask it, and fill all values
                SNePhot[key] = MaskedColumn(numpy.zeros(nSNePhot),
                                            numpy.ones(nSNePhot, bool))
                SNePhot[
                    key].fill_value = -99  # does not work as expected, so we set it explicitly in the next line
                SNePhot[key] = -99.9
            else:
                # Do nothing if the column already exists
                pass

        # Add the source column
        SNePhot['source'] = "Phot_Uddin"

    # ----------------------  CfA4 ----------------------------------
    if params['CfA4LightCurveFits'] != 'None':
        lightCurveFits = JLA.get_full_path(params['CfA4LightCurveFits'])
        f = open(lightCurveFits)
        header = f.readlines()
        f.close()
        names = header[0].strip('#').split(',')

        SNeCfA4 = Table(
            numpy.genfromtxt(lightCurveFits,
                             skip_header=1,
                             dtype='S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
                             names=names,
                             delimiter=','))

        nSNeCfA4 = len(SNeCfA4)

        conversion = {
            'name': 'name',
            'zcmb': None,
            'zhel': 'z',
            'dz': None,
            'mb': 'mb',
            'dmb': 'emb',
            'x1': 'x1',
            'dx1': 'ex1',
            'color': 'c',
            'dcolor': 'ec',
            '3rdvar': None,
            'd3rdvar': None,
            'tmax': None,
            'dtmax': None,
            'cov_m_s': 'cov_m_x1',
            'cov_m_c': 'cov_m_c',
            'cov_s_c': 'cov_x1_c',
            'set': None,
            'ra': None,
            'dec': None,
            'biascor': None
        }

        # Remove columns that are not listed in conversion

        for colname in SNeCfA4.colnames:
            if colname not in conversion.values():
                SNeCfA4.remove_column(colname)

        for key in conversion.keys():
            # Rename the column if it does not already exist
            if conversion[key] != None and conversion[key] != key:
                SNeCfA4.rename_column(conversion[key], key)
            elif conversion[key] == None:
                # Create it, mask it, and fill all values
                SNeCfA4[key] = MaskedColumn(numpy.zeros(nSNeCfA4),
                                            numpy.ones(nSNeCfA4, bool))
                SNeCfA4[
                    key].fill_value = -99  # does not work as expected, so we set it explicitly in the next line
                SNeCfA4[key] = -99.9
            else:
                # Do nothing if the column already exists
                pass

        # Add the source column
        SNeCfA4['source'] = "CfA4"

    try:
        SNe = vstack([SNeSpec, SNePhot, SNeCfA4])
    except:
        SNe = SNeSpec

    # Write out the result as a FITS table
    date = JLA.get_date()
    SNe.write('%s_%s.fits' % (options.output, date), format='fits')

    return
示例#24
0
def compute_Ccal(options):
    """Python program to compute Ccal
    """

    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table
    import multiprocessing as mp
    import matplotlib.pyplot as plt

    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)
    try:
        salt_prefix = params['saltPrefix']
    except KeyError:
        salt_prefix = ''

    # ---------- Read in the SNe list -------------------------

    SNeList = Table(numpy.genfromtxt(options.SNlist,
                                     usecols=(0, 2),
                                     dtype='S30,S100',
                                     names=['id', 'lc']))


    for i,SN in enumerate(SNeList):
        SNeList['id'][i]=SNeList['id'][i].replace('lc-', '').replace('.list', '').replace('_smp', '')

    # ----------  Read in the SN light curve fits ------------
    # This is used to get the SN redshifts which are used in smoothing the Jacbian

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    # Make sure that the order is correct
    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe = SNe[indices]
    if len(indices) != len(SNeList['id']):
        print "We are missing SNe"
        exit()

    # -----------  Set up the structures to handle the different salt models -------
    # The first model is the unperturbed salt model
    SALTpath=JLA.get_full_path(params['saltPath'])

    SALTmodels=JLA.SALTmodels(SALTpath+'/saltModels.list')
    nSALTmodels=len(SALTmodels)-1
    print SALTmodels, nSALTmodels

    nSNe=len(SNeList)
    print 'There are %d SNe in the sample' % (nSNe)
    print 'There are %d SALT models' % (nSALTmodels)

    # Add a survey column, which we use with the smoothing, and the redshift
    SNeList['survey'] = numpy.zeros(nSNe,'a10')
    SNeList['z'] = SNe['zhel']

    # Identify the SNLS, SDSS, HST and low-z SNe. We use this when smoothing the Jacobian
    # There is rather inelegant 
    # We still need to allow for Vanina's naming convention when doing this for the photometric sample
    for i,SN in enumerate(SNeList):
        if SN['id'][0:4]=='SDSS':
            SNeList['survey'][i]='SDSS'
        elif SN['id'][2:4] in ['D1','D2','D3','D4']:
            SNeList['survey'][i]='SNLS'
        elif SN['id'][0:3] in ['DES']:
            SNeList['survey'][i]='DES'
        elif SN['id'][0:2]=='sn':
            SNeList['survey'][i]='nearby'
        else:
            SNeList['survey'][i]='high-z'

    # -----------   Read in the calibration matrix -----------------
    Cal=fits.getdata(JLA.get_full_path(params['C_kappa']))

    # Multiply the ZP submatrix by 100^2, and the two ZP-offset submatrices by 100,
    # because the magnitude offsets are 0.01 mag and the units of the covariance matrix are mag
    size=Cal.shape[0] / 2
    Cal[0:size,0:size]=Cal[0:size,0:size]*10000.
    Cal[0:size,size:]*=Cal[0:size,size:]*100.
    Cal[size:,0:size]=Cal[size:,0:size]*100.


    # ------------- Create an area to work in -----------------------
    workArea = JLA.get_full_path(options.workArea)
    try:
        os.mkdir(workArea)
    except:
        pass

    # -----------   The lightcurve fitting --------------------------

    firstSN=True
    
    log=open('log.txt','w')

    for i,SN in enumerate(SNeList):
        J=[]
        try:
            os.mkdir(workArea+'/'+SN['id'])
        except:
            pass

        #firstModel=True
        print 'Examining SN #%d %s' % (i+1,SN['id'])

        # Set up the number of processes
        pool = mp.Pool(processes=int(options.processes))
        # runSALT is the program that does the lightcurve fitting
        results = [pool.apply(runSALT, args=(SALTpath,
                                             SALTmodel,
                                             salt_prefix,
                                             SN['lc'],
                                             SN['id'])) for SALTmodel in SALTmodels]
        for result in results[1:]:
            # The first model is the unperturbed model
            dM,dX,dC=JLA.computeOffsets(results[0],result)
            J.extend([dM,dX,dC])
        pool.close() # This prevents to many open files

        if firstSN:
            J_new=numpy.array(J).reshape(nSALTmodels,3).T
            firstSN=False
        else:
            J_new=numpy.concatenate((J_new,numpy.array(J).reshape(nSALTmodels,3).T),axis=0)

        log.write('%d rows %d columns\n' % (J_new.shape[0],J_new.shape[1]))

    log.close()

    # Compute the new covariance matrix J . Cal . J.T produces a 3 * n_SN by 3 * n_SN matrix
    # J=jacobian

    J_smoothed=numpy.array(J_new)*0.0
    J=J_new

    # We need to concatenate the different samples ...
    
    if options.Plot:
        try:
            os.mkdir('figures')
        except:
            pass               

    nPoints={'SNLS':11,'SDSS':11,'nearby':11,'high-z':11,'DES':11} 
    #sampleList=['nearby','DES']
    sampleList=params['smoothList'].split(',')
    if options.smoothed:
        # We smooth the Jacobian 
        # We roughly follow the method descibed in the footnote of p13 of B14
        for sample in sampleList:
            selection=(SNeList['survey']==sample)
            J_sample=J[numpy.repeat(selection,3)]

            for sys in range(nSALTmodels):
                # We need to convert to a numpy array
                # There is probably a better way
                redshifts=numpy.array([z for z in SNeList[selection]['z']])
                derivatives_mag=J_sample[0::3][:,sys]  # [0::3] = [0,3,6 ...] Every 3rd one
                #print redshifts.shape, derivatives_mag.shape, nPoints[sample]
                forPlotting_mag,res_mag=JLA.smooth(redshifts,derivatives_mag,nPoints[sample])
                derivatives_x1=J_sample[1::3][:,sys]
                forPlotting_x1,res_x1=JLA.smooth(redshifts,derivatives_x1,nPoints[sample])
                derivatives_c=J_sample[2::3][:,sys]
                forPlotting_c,res_c=JLA.smooth(redshifts,derivatives_c,nPoints[sample])

                # We need to insert the new results into the smoothed Jacobian matrix in the correct place
                # The Jacobian ia a 3 * n_SN by nSATLModels matrix
                # The rows are ordered by the mag, stretch and colour of each SNe.
                J_smoothed[numpy.repeat(selection,3),sys]=numpy.concatenate([res_mag,res_x1,res_c]).reshape(3,selection.sum()).ravel('F')

                # If required, make some plots as a way of checking 

                if options.Plot:
                    print 'Creating plot for systematic %d and sample %s' % (sys, sample) 
                    fig=plt.figure()
                    ax1=fig.add_subplot(311)
                    ax2=fig.add_subplot(312)
                    ax3=fig.add_subplot(313)
                    ax1.plot(redshifts,derivatives_mag,'bo')
                    ax1.plot(forPlotting_mag[0],forPlotting_mag[1],'r-')
                    ax1.set_ylabel('mag')
                    ax2.plot(redshifts,derivatives_x1,'bo')
                    ax2.plot(forPlotting_x1[0],forPlotting_x1[1],'r-')
                    ax2.set_ylabel('x1')
                    ax3.plot(redshifts,derivatives_c,'bo')
                    ax3.plot(forPlotting_c[0],forPlotting_c[1],'r-')
                    ax3.set_ylabel('c')
                    ax3.set_xlabel('z')
        
                    plt.savefig('figures/%s_sys_%d.png' % (sample,sys))
                    plt.close()

    date=JLA.get_date()


    fits.writeto('J_%s.fits' % (date) ,J,clobber=True) 
    fits.writeto('J_smoothed_%s.fits' % (date), J_smoothed,clobber=True) 

    # Some matrix arithmatic
    # C_cal is a nSALTmodels by nSALTmodels matrix

    # Read in a smoothed Jacobian specified in the options
    if options.jacobian != None:
        J_smoothed=fits.getdata(options.jacobian)
#    else:
#        # Replace the NaNs in your smoothed Jacobian with zero
#        J_smoothed[numpy.isnan(J_smoothed)]=0

    C=numpy.matrix(J_smoothed)*numpy.matrix(Cal)*numpy.matrix(J_smoothed).T
    if options.output==None:
        fits.writeto('C_cal_%s.fits' % (date), numpy.array(C), clobber=True) 
    else:
        fits.writeto('%s.fits' % (options.output),numpy.array(C),clobber=True)

    return
示例#25
0
    )

    parser.add_option("-s", "--SNlist", dest="SNlist", help="List of SNe")

    parser.add_option(
        "-l",
        "--lcfits",
        dest="lcfits",
        default="lightCurveFits",
        help="Key in config file pointing to lightcurve fit parameters")

    (options, args) = parser.parse_args()

    params = JLA.build_dictionary(options.config)

    lcfile = JLA.get_full_path(params[options.lcfits])
    SN_data = Table.read(lcfile, format='fits')

    SN_list_long = np.genfromtxt(options.SNlist, usecols=(0), dtype='S30')
    SN_list = [
        name.replace('lc-', '').replace('.list', '') for name in SN_list_long
    ]

    SN_indices = JLA.reindex_SNe(SN_list, SN_data)
    SN_data = SN_data[SN_indices]

    host_correction = HostCorrection()

    C_host = host_correction.covmat_host(SN_data)

    date = JLA.get_date()
示例#26
0
def compute_bias(options):

    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA
    from astropy.table import Table
    from astropy.cosmology import FlatwCDM
    from  scipy.optimize import leastsq
    import matplotlib.pyplot as plt
    from scipy.stats import t


    # -----------  Read in the configuration file ------------
    params=JLA.build_dictionary(options.config)

    # -----------  Read in the SN ordering ------------------------
    SNeList = Table(numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc']))
    nSNe = len(SNeList)

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-', '').replace('.list', '').replace('_smp','')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')
    print 'There are %d SNe' % (nSNe)

    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe=SNe[indices]

    # Add a column that records the error in the bias correction
    SNe['e_bias'] = numpy.zeros(nSNe,'f8')

    # Read in the bias correction (see, for example, Fig.5 in B14)
    # Fit a polynomial to the data
    # Determine the uncertainties

    bias = numpy.genfromtxt(JLA.get_full_path(params['biasPolynomial']),
                                  skip_header=4,
                                  usecols=(0, 1, 2, 3),
                                  dtype='S10,f8,f8,f8',
                                  names=['sample', 'redshift', 'bias', 'e_bias'])

    if options.plot:
        fig=plt.figure()
        ax=fig.add_subplot(111)
        colour={'nearby':'b','SNLS':'r','SDSS':'g','DES':'k'}

    for sample in numpy.unique(bias['sample']):
        selection=(bias['sample']==sample)
        guess=[0,0,0]

        print bias[selection]
        plsq=leastsq(residuals, guess, args=(bias[selection]['bias'],
                                             bias[selection]['redshift'],
                                             bias[selection]['e_bias'],
                                             'poly'), full_output=1)

        if plsq[4] in [1,2,3,4]:
            print 'Solution for %s found' % (sample)

        if options.plot:
            ax.errorbar(bias[selection]['redshift'],
                    bias[selection]['bias'],
                    yerr=bias[selection]['e_bias'],
                    ecolor='k',
                    color=colour[sample],
                    fmt='o',
                    label=sample)
            z=numpy.arange(numpy.min(bias[selection]['redshift']),numpy.max(bias[selection]['redshift']),0.001)
            ax.plot(z,poly(z,plsq[0]),color=colour[sample])

        # For each SNe, determine the uncerainty in the correction. We use the approach descibed in
        # https://www.astro.rug.nl/software/kapteyn/kmpfittutorial.html
        
        # Compute the chi-sq.
        chisq=(((bias[selection]['bias']-poly(bias[selection]['redshift'],plsq[0]))/bias[selection]['e_bias'])**2.).sum()
        dof=selection.sum()-len(guess)
        print "Reduced chi-square value for sample %s is %5.2e" % (sample, chisq / dof)

        alpha=0.315 # Confidence interval is 100 * (1-alpha)
        # Compute the upper alpha/2 value for the student t distribution with dof
        thresh=t.ppf((1-alpha/2.0), dof)
        
        if options.plot and sample!='nearby':
            # The following is only valid for polynomial fitting functions, and we do not compute it for the nearby sample
            upper_curve=[]
            lower_curve=[]
            for x in z:
                vect=numpy.matrix([1,x,x**2.])
                offset=thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])
                upper_curve.append(poly(x,plsq[0])+offset)
                lower_curve.append(poly(x,plsq[0])-offset)

            ax.plot(z,lower_curve,'--',color=colour[sample])
            ax.plot(z,upper_curve,'--',color=colour[sample])

        # Compute the error in the bias
        # We increase the absolute value
        # In other words, if the bias is negative, we subtract the error to make it even more negative
        # This is to get the correct sign in the off diagonal elements
        # We assume 100% correlation between SNe
        for i,SN in enumerate(SNe):
            if SN['zcmb'] > 0:
                redshift = SN['zcmb']
            else:
                redshift = SN['zhel']
            if JLA.survey(SN) == sample:
                # For the nearby SNe, the uncertainty in the bias correction is the bias correction itself
                if sample=='nearby':
                    SNe['e_bias'][i]=poly(redshift,plsq[0])
                    #print SN['name'],redshift, SNe['e_bias'][i]
                else:
                    vect = numpy.matrix([1,redshift,redshift**2.])
                    if poly(redshift,plsq[0]) > 0:
                        sign = 1
                    else:
                        sign = -1

                    SNe['e_bias'][i] = sign * thresh * numpy.sqrt(chisq / dof * (vect*numpy.matrix(plsq[1])*vect.T)[0,0])

                # We are getting some unrealistcally large values

    date = JLA.get_date()

    if options.plot:
        ax.legend()
        plt.savefig('C_bias_%s.png' % (date))
        plt.close()

    # Compute the bias matrix
    # 

    Zero=numpy.zeros(nSNe)
    H=numpy.concatenate((SNe['e_bias'],Zero,Zero)).reshape(3,nSNe).ravel(order='F')
    C_bias = numpy.matrix(H).T * numpy.matrix(H)

    fits.writeto('C_bias_%s.fits' % (date),C_bias,clobber=True) 

    return None
示例#27
0
def compute_rel_size(options):
    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table
    import JLA_library as JLA
    from astropy.cosmology import FlatwCDM
    import os
    
    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)

    # ---------- Read in the SNe list -------------------------

    SNeList=numpy.genfromtxt(options.SNlist,usecols=(0,2),dtype='S30,S200',names=['id','lc'])

    for i,SN in enumerate(SNeList):
        SNeList['id'][i]=SNeList['id'][i].replace('lc-','').replace('.list','')

    # -----------  Read in the data JLA --------------------------

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    nSNe=len(SNe)
    print 'There are %d SNe in this sample' % (nSNe)

    # sort it to match the listing in options.SNlist
    indices = JLA.reindex_SNe(SNeList['id'], SNe)        
    SNe=SNe[indices]

    # ---------- Compute the Jacobian ----------------------
    # The Jacobian is an m by 4 matrix, where m is the number of SNe
    # The columns are ordered in terms of Om, w, alpha and beta

    J=[]
    JLA_result={'Om':0.303,'w':-1.00,'alpha':0.141,'beta':3.102,'M_B':-19.05}
    offset={'Om':0.01,'w':0.01,'alpha':0.01,'beta':0.01,'M_B':0.01}
    nFit=4

    cosmo1 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=JLA_result['Om'], w0=JLA_result['w'])

    # Varying Om
    cosmo2 = FlatwCDM(name='SNLS3+WMAP7', H0=70.0, Om0=JLA_result['Om']+offset['Om'], w0=JLA_result['w'])
    J.append(5*numpy.log10((cosmo1.luminosity_distance(SNe['zcmb'])/cosmo2.luminosity_distance(SNe['zcmb']))[:,0]))

    # varying alpha
    J.append(1.0*offset['alpha']*SNe['x1'][:,0])

    # varying beta
    J.append(-1.0*offset['beta']*SNe['color'][:,0])

    # varying M_B

    J.append(offset['M_B']*numpy.ones(nSNe))
    
    J = numpy.matrix(numpy.concatenate((J)).reshape(nSNe,nFit,order='F') * 100.)

    # Set up the covariance matrices

    systematic_terms = ['bias', 'cal', 'host', 'dust', 'model', 'nonia', 'pecvel', 'stat']

    covmatrices = {'bias':params['bias'],
                   'cal':params['cal'],
                   'host':params['host'],
                   'dust':params['dust'],
                   'model':params['model'],
                   'nonia':params['nonia'],
                   'pecvel':params['pecvel'],
                   'stat':params['stat']}


    if options.type in systematic_terms:
        print "Using %s for the %s term" % (options.name,options.type) 
        covmatrices[options.type]=options.name

    # Combine the matrices to compute the full covariance matrix, and compute its inverse
    if options.all:
        #read in the user provided matrix, otherwise compute it, and write it out
        C=fits.getdata(JLA.get_full_path(params['all']))
    else:
        C=add_covar_matrices(covmatrices,params['diag'])
        date=JLA.get_date()
        fits.writeto('C_total_%s.fits' % (date), C, clobber=True)

    Cinv=numpy.matrix(C).I


    # Construct eta, a 3n vector

    eta=numpy.zeros(3*nSNe)
    for i,SN in enumerate(SNe):
        eta[3*i]=SN['mb']
        eta[3*i+1]=SN['x1']
        eta[3*i+2]=SN['color']

    # Construct A, a n x 3n matrix
    A=numpy.zeros(nSNe*3*nSNe).reshape(nSNe,3*nSNe)

    for i in range(nSNe):
        A[i,3*i]=1.0
        A[i,3*i+1]=JLA_result['alpha']
        A[i,3*i+2]=-JLA_result['beta']

    # ---------- Compute W  ----------------------
    # W has shape m * 3n, where m is the number of fit paramaters.

    W=(J.T * Cinv * J).I * J.T* Cinv* numpy.matrix(A)

    # Note that (J.T * Cinv * J) is a m x m matrix, where m is the number of fit parameters

    # ----------- Compute V_x, where x represents the systematic uncertainty

    result=[]

    for term in systematic_terms:
        cov=numpy.matrix(fits.getdata(JLA.get_full_path(covmatrices[term])))
        if 'C_stat' in covmatrices[term]:
            # Add diagonal term from Eq. 13 to the magnitude
            sigma = numpy.genfromtxt(JLA.get_full_path(params['diag']),comments='#',usecols=(0,1,2),dtype='f8,f8,f8',names=['sigma_coh','sigma_lens','sigma_pecvel'])
            for i in range(nSNe):
                cov[3*i,3*i] += sigma['sigma_coh'][i] ** 2 + sigma['sigma_lens'][i] ** 2 + sigma['sigma_pecvel'][i] ** 2



        V=W * cov * W.T
        result.append(V[0,0])

    print '%20s\t%5s\t%5s\t%s' % ('Term','sigma','var','Percentage')
    for i,term in enumerate(systematic_terms):
        if options.type!=None and term==options.type:
            print '* %18s\t%5.4f\t%5.4f\t%4.1f' % (term,numpy.sqrt(result[i]),result[i],result[i]/numpy.sum(result)*100.)
        else:
            print '%20s\t%5.4f\t%5.4f\t%4.1f' % (term,numpy.sqrt(result[i]),result[i],result[i]/numpy.sum(result)*100.)

    print '%20s\t%5.4f' % ('Total',numpy.sqrt(numpy.sum(result)))

    return
def updateCfA2(options,params,model):

    shutil.copytree(JLA.get_full_path(params['CfA2_instrument']),options.output+'/'+model['modelNumber']+'/snfit_data/Instruments/CfA2')
    
    return
def compareSALTsurfaces(surface):

    import matplotlib.pyplot as plt

    # -----------  Read in the configuration file ------------

    params=JLA.build_dictionary(options.config)
    
    # -----------  Read in the SALT models -------------------

    surface1=readSALTsurface(JLA.get_full_path(params['model1'])+'salt2-4/')
#    surface2=readSALTsurface(JLA.get_full_path(params['model2'])+'salt2-4/')
    surface2=readSALTsurface(JLA.get_full_path(params['model2']))
    

    # -----------  Plot the surfaces ----------------------
    fig1=plt.figure()
    for axes,x1 in enumerate([-2,0,2]):
        ax=fig1.add_subplot(3,1,axes+1)
        flux1=surface1.template_0[options.phase]['flux'] + x1 * surface1.template_1[options.phase]['flux']
        flux2=surface2.template_0[options.phase]['flux'] + x1 * surface2.template_1[options.phase]['flux']
        ax.plot(surface1.template_0[options.phase]['wave'],flux1)
        ax.plot(surface2.template_0[options.phase]['wave'],flux2)
        ax.text(7000,0.3,"C=0 x1=%2d" % x1)

    ax.set_xlabel("wavelength ($\AA$)")
        
    plt.savefig(options.config.replace(".config","_SED.png"))

       
    # -----------  Plot the colour laws ----------------------

    # See salt2extinction.cc
    # Note the extrapolation

#    /*
#    ========================================================
#    VERSION 1
#    ========================================================
#    if(l_B<=l<=l_R)
#    ext = exp( color * constant * ( alpha*l + params(0)*l^2 + params(1)*l^3 + ... ))
#    = exp( color * constant * P(l) )
#    alpha = 1-params(0)-params(1)-...
#    if(l>l_R)
#    ext = exp( color * constant * ( P(l_R) + P'(l_R)*(l-l_R) ) )
#    if(l<l_B)
#    ext = exp( color * constant * ( P(l_B) + P'(l_B)*(l-l_B) ) )
#  
#    ======================================================== 
#    */

    constant=0.4 * numpy.log(10)
    fig3=plt.figure()
    ax3=fig3.add_subplot(111)
    wave=surface1.template_0[options.phase]['wave']
    wave_min=2800.
    wave_max=7000.
    
    wave_min_reduced=reduced_lambda(wave_min)
    wave_max_reduced=reduced_lambda(wave_max)
    
    # See salt2extinction.h
    reduced_wave=reduced_lambda(wave)

    # Model 1
    alpha1=1.0
    
    # There are 4 co-efficients in the colour law
    for coeff in surface1.colour_law['coeff']:
        alpha1-=coeff

    p1=numpy.zeros(len(reduced_wave))
        
    # Compute derivatives for extrapolations
    p1_derivative_min=derivative(alpha1,surface1,wave_min_reduced)
    p1_derivative_max=derivative(alpha1,surface1,wave_max_reduced)

    # Compute colour law at the points of extrapolations
    p1_wave_min_reduced=colourLaw(alpha1,surface1,wave_min_reduced)
    p1_wave_max_reduced=colourLaw(alpha1,surface1,wave_max_reduced)

    for index,rl in enumerate(reduced_wave):
        if rl < wave_min_reduced:
            p1[index]=p1_wave_min_reduced+p1_derivative_min*(rl-wave_min_reduced)
        elif rl > wave_max_reduced:
            p1[index]=p1_wave_max_reduced+p1_derivative_max*(rl-wave_max_reduced)
        else:
            p1[index]=colourLaw(alpha1,surface1,rl)
    
    # Model 2
    alpha2=1.0

    for coeff in surface2.colour_law['coeff']:
        alpha2-=coeff

    p2=numpy.zeros(len(reduced_wave))
            
    # Compute derivatives for extrapolations
    p2_derivative_min=derivative(alpha2,surface2,wave_min_reduced)
    p2_derivative_max=derivative(alpha2,surface2,wave_max_reduced)

    # Compute colour law at the points of extrapolations
    p2_wave_min_reduced=colourLaw(alpha2,surface2,wave_min_reduced)
    p2_wave_max_reduced=colourLaw(alpha2,surface2,wave_max_reduced)

    for index,rl in enumerate(reduced_wave):
        if rl < wave_min_reduced:
            p2[index]=p2_wave_min_reduced+p2_derivative_min*(rl-wave_min_reduced)
        elif rl > wave_max_reduced:
            p2[index]=p2_wave_max_reduced+p2_derivative_max*(rl-wave_max_reduced)
        else:
            p2[index]=colourLaw(alpha2,surface2,rl)
    
    # See Fig.3 of B14.
    # p1 and p2 are the log (colour law)
    
    C=-0.1

    A1_wave=p1*C
    ax3.plot(wave, A1_wave,label='model1')
    
    A2_wave=p2*C
    ax3.plot(wave, A2_wave, label='model2')

    # Plot CCM R_V=3.1
    E_BV=0.1
    R_V=3.1
    a_wave=E_BV * R_V * CCM(wave, R_V)
    a_B=E_BV * R_V * CCM(numpy.array([wave_B]),R_V)
    ax3.plot(wave,a_wave-a_B,label='CCM R_V=3.1')

    #CCM R_V=1.0
    R_V=1.0
    a_wave=E_BV * R_V * CCM(wave, R_V)
    a_B=E_BV * R_V * CCM(numpy.array([wave_B]),R_V)
    ax3.plot(wave,a_wave-a_B,label='CCM R_V=1.0')

    # F99 R_V=3.1
    a_wave=E_BV * R_V * Fitz99(wave)
    a_B=E_BV * R_V * Fitz99(numpy.array([wave_B]))
    ax3.plot(wave,a_wave-a_B,label='F99 R_V=3.1')
    
    ax3.legend()
    ax3.set_xlabel("wavelength ($\AA$)")
    ax3.set_ylim(-0.3,0.8)
    plt.savefig(options.config.replace(".config","_colourlaw.png"))

    # -----------  Plot examples of the impact of colour ----------------------
    # Assume x1=0
    # Note
    # The colour laws p1 and p2 have the absorption in the B band subtracted
    # The units are magnitudes
    # Are we correctly applyng the colour law?

    fig2=plt.figure()
    for axes,C in enumerate([-0.1,0,0.1]):
        ax2=fig2.add_subplot(3,1,axes+1)
        flux1=surface1.template_0[options.phase]['flux'] * numpy.exp(C*constant*p1)
        flux2=surface2.template_0[options.phase]['flux'] * numpy.exp(C*constant*p2)
        ax2.plot(surface1.template_0[options.phase]['wave'],flux1)
        ax2.plot(surface2.template_0[options.phase]['wave'],flux2)
        ax2.text(7000,0.3,"C=%4.1f x1=0.0" % C)

    ax2.set_xlabel("wavelength ($\AA$)")
    plt.savefig(options.config.replace(".config","_colour_SED.png"))

    plt.show()
    plt.close()
    

    return
示例#30
0
def compute_rel_size(options):
    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table
    import JLA_library as JLA
    from astropy.cosmology import FlatwCDM
    import os

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)

    # ---------- Read in the SNe list -------------------------

    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-',
                                                    '').replace('.list', '')

    # -----------  Read in the data JLA --------------------------

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    nSNe = len(SNe)
    print 'There are %d SNe in this sample' % (nSNe)

    # sort it to match the listing in options.SNlist
    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe = SNe[indices]

    # ---------- Compute the Jacobian ----------------------
    # The Jacobian is an m by 4 matrix, where m is the number of SNe
    # The columns are ordered in terms of Om, w, alpha and beta

    J = []
    JLA_result = {
        'Om': 0.303,
        'w': -1.00,
        'alpha': 0.141,
        'beta': 3.102,
        'M_B': -19.05
    }
    offset = {'Om': 0.01, 'w': 0.01, 'alpha': 0.01, 'beta': 0.01, 'M_B': 0.01}
    nFit = 4

    cosmo1 = FlatwCDM(name='SNLS3+WMAP7',
                      H0=70.0,
                      Om0=JLA_result['Om'],
                      w0=JLA_result['w'])

    # Varying Om
    cosmo2 = FlatwCDM(name='SNLS3+WMAP7',
                      H0=70.0,
                      Om0=JLA_result['Om'] + offset['Om'],
                      w0=JLA_result['w'])
    J.append(5 * numpy.log10((cosmo1.luminosity_distance(SNe['zcmb']) /
                              cosmo2.luminosity_distance(SNe['zcmb']))[:, 0]))

    # varying alpha
    J.append(1.0 * offset['alpha'] * SNe['x1'][:, 0])

    # varying beta
    J.append(-1.0 * offset['beta'] * SNe['color'][:, 0])

    # varying M_B

    J.append(offset['M_B'] * numpy.ones(nSNe))

    J = numpy.matrix(
        numpy.concatenate((J)).reshape(nSNe, nFit, order='F') * 100.)

    # Set up the covariance matrices

    systematic_terms = [
        'bias', 'cal', 'host', 'dust', 'model', 'nonia', 'pecvel', 'stat'
    ]

    covmatrices = {
        'bias': params['bias'],
        'cal': params['cal'],
        'host': params['host'],
        'dust': params['dust'],
        'model': params['model'],
        'nonia': params['nonia'],
        'pecvel': params['pecvel'],
        'stat': params['stat']
    }

    if options.type in systematic_terms:
        print "Using %s for the %s term" % (options.name, options.type)
        covmatrices[options.type] = options.name

    # Combine the matrices to compute the full covariance matrix, and compute its inverse
    if options.all:
        #read in the user provided matrix, otherwise compute it, and write it out
        C = fits.getdata(JLA.get_full_path(params['all']))
    else:
        C = add_covar_matrices(covmatrices, params['diag'])
        date = JLA.get_date()
        fits.writeto('C_total_%s.fits' % (date), C, clobber=True)

    Cinv = numpy.matrix(C).I

    # Construct eta, a 3n vector

    eta = numpy.zeros(3 * nSNe)
    for i, SN in enumerate(SNe):
        eta[3 * i] = SN['mb']
        eta[3 * i + 1] = SN['x1']
        eta[3 * i + 2] = SN['color']

    # Construct A, a n x 3n matrix
    A = numpy.zeros(nSNe * 3 * nSNe).reshape(nSNe, 3 * nSNe)

    for i in range(nSNe):
        A[i, 3 * i] = 1.0
        A[i, 3 * i + 1] = JLA_result['alpha']
        A[i, 3 * i + 2] = -JLA_result['beta']

    # ---------- Compute W  ----------------------
    # W has shape m * 3n, where m is the number of fit paramaters.

    W = (J.T * Cinv * J).I * J.T * Cinv * numpy.matrix(A)

    # Note that (J.T * Cinv * J) is a m x m matrix, where m is the number of fit parameters

    # ----------- Compute V_x, where x represents the systematic uncertainty

    result = []

    for term in systematic_terms:
        cov = numpy.matrix(fits.getdata(JLA.get_full_path(covmatrices[term])))
        if 'C_stat' in covmatrices[term]:
            # Add diagonal term from Eq. 13 to the magnitude
            sigma = numpy.genfromtxt(
                JLA.get_full_path(params['diag']),
                comments='#',
                usecols=(0, 1, 2),
                dtype='f8,f8,f8',
                names=['sigma_coh', 'sigma_lens', 'sigma_pecvel'])
            for i in range(nSNe):
                cov[3 * i, 3 * i] += sigma['sigma_coh'][i]**2 + sigma[
                    'sigma_lens'][i]**2 + sigma['sigma_pecvel'][i]**2

        V = W * cov * W.T
        result.append(V[0, 0])

    print '%20s\t%5s\t%5s\t%s' % ('Term', 'sigma', 'var', 'Percentage')
    for i, term in enumerate(systematic_terms):
        if options.type != None and term == options.type:
            print '* %18s\t%5.4f\t%5.4f\t%4.1f' % (term, numpy.sqrt(
                result[i]), result[i], result[i] / numpy.sum(result) * 100.)
        else:
            print '%20s\t%5.4f\t%5.4f\t%4.1f' % (term, numpy.sqrt(
                result[i]), result[i], result[i] / numpy.sum(result) * 100.)

    print '%20s\t%5.4f' % ('Total', numpy.sqrt(numpy.sum(result)))

    return
示例#31
0
    )

    parser.add_option("-s", "--SNlist", dest="SNlist", help="List of SNe")

    parser.add_option(
        "-l",
        "--lcfits",
        dest="lcfits",
        default="lightCurveFits",
        help="Key in config file pointing to lightcurve fit parameters")

    (options, args) = parser.parse_args()

    params = JLA.build_dictionary(options.config)

    lcfile = JLA.get_full_path(params[options.lcfits])
    SN_data = Table.read(lcfile, format='fits')

    SN_list_long = np.genfromtxt(options.SNlist, usecols=(0), dtype='S30')
    SN_list = [
        name.replace('lc-', '').replace('.list', '') for name in SN_list_long
    ]

    SN_indices = JLA.reindex_SNe(SN_list, SN_data)
    SN_data = SN_data[SN_indices]

    velfile = JLA.get_full_path(params['velocityField'])
    vel_correction = VelocityCorrection(velfile)
    #z_correction = vel_correction.apply(SN_data)

    C_pecvel = vel_correction.covmat_pecvel(SN_data)
示例#32
0
    parser.add_option("-c", "--config", dest="config", default=None,
                      help="Parameter file containing the location of various files")

    (options, args) = parser.parse_args()

    # Read in the parameter file
    params = JLA.build_dictionary(options.config)

    # Use JLA values
    alpha = 0.14
    beta = 3.1
    M_1_B = -19.02 # We use a brigter value JLA is -19.05
    Delta_M = -0.08
    # O_m?

    C_eta = fits.getdata(JLA.get_full_path(params['eta']))
    sigma_diag = np.genfromtxt(JLA.get_full_path(params['diag']), comments='#', \
        usecols=(0, 1, 2), dtype='f8,f8,f8', names=['coh', 'lens', 'pecvel'])

    # Read in the lightcurve fit parameters
    lcfits = JLA.get_full_path(params['lightCurveFits'])
    data_all = Table.read(lcfits)

    # Read in the list of SNe that are used in the BBC analysis
    include=np.genfromtxt(JLA.get_full_path(params['include']), comments='#', \
        usecols=(0), dtype=[('name','a20')])


    include2=[]
    for name in include['name']:
        if name[0]=='1':
示例#33
0
    parser = OptionParser()

    parser.add_option("-c", "--config", dest="config", default="JLA.config",
                      help="Parameter file containing the location of various JLA parameters")

    parser.add_option("-s", "--SNlist", dest="SNlist",
                      help="List of SNe")

    parser.add_option("-l", "--lcfits", dest="lcfits", default="lightCurveFits",
                      help="Key in config file pointing to lightcurve fit parameters")
    
    parser.add_option("-o", "--output", dest="output",default="sigma_mu.txt", 
                  help="Output")

    (options, args) = parser.parse_args()

    params = JLA.build_dictionary(options.config)
    
    lcfile = JLA.get_full_path(params[options.lcfits])
    SN_data = Table.read(lcfile, format='fits')

    SN_list_long = np.genfromtxt(options.SNlist, usecols=(0), dtype='S30')
    SN_list = [name.replace('lc-', '').replace('.list', '').replace('_smp','') for name in SN_list_long]
    SN_indices = JLA.reindex_SNe(SN_list, SN_data)
    SN_data = SN_data[SN_indices]

    sigma_diag = compute_diag(SN_data)

    np.savetxt(options.output,sigma_diag, header='coh lens pecvel')
示例#34
0
def adjustExtinction(options):
    import numpy
    from astropy.table import Table
    import JLA_library as JLA
    import subprocess as sp
    import os

    params=JLA.build_dictionary(options.config)

    # ----------- Correction factor for extinction -----------
    # See ApJ 737 103
    extinctionFactor=0.86

    SNlist=Table.read(params['SNlist'], format='ascii',names=['name','type','lc'],data_start=0)

    try:
        os.mkdir(JLA.get_full_path(params['outputDir']))
    except:
        pass

    for SN in SNlist:
        # Clean the directory
        if options.clean:
            cmd="*fits co* *opt0* *step* re* salt2* spec_coverage* sne_pcafit.list *init* pca_1_opt1_before_final_normalization.list pca_1_superinit.list"
            sp.call(cmd,shell=True)

        # Copy accross the file
        outputDir=JLA.get_full_path(params['outputDir'])
        inputFile=outputDir+os.path.split(SN['lc'])[1]
        cmd='cp %s %s' % (SN['lc'],inputFile)
        sp.call(cmd, shell=True)

        lc=open(inputFile)
        lines=lc.readlines()
        lc.close()

        if SN['type']=='LC':
            # Copy accross the covmat file, if it exists
            covmatExist=False
            for line in lines:
                try:
                    if "@COVMAT"==line.split()[0]:
                        covmat=line.split()[1]
                        covmatExist=True
                        break
                except:
                    pass

            if covmatExist:
                cmd='cp %s %s' % (os.path.split(SN['lc'])[0]+'/'+covmat,outputDir)
                sp.call(cmd, shell=True)
            
            # Adjust the extinction
            mwebv=0.0
            for line in lines:
                if "@MWEBV"==line.split()[0]:
                    mwebv=float(line.split()[1])
                    break

            if mwebv>0:
                # Remove the old extcintion and insert the new one
                lc=open(inputFile,'w')
                for line in lines:
                    if 'MWEBV' in line:
                        lc.write('@MWEBV %5.4f\n' % (mwebv * extinctionFactor))
                    else:
                        lc.write(line)
                lc.close()
            else:
                print "WARNING: Zero or no extinction for %s" % (inputFile) 

            # Adjust the ZP reference, if required
            if not options.adjustZPref:
                continue

            adjust=None
            for line in lines:
                try:
                    entries=line.split()
                    if entries[0]=="@SURVEY":
                        if entries[1] in ['Riess1999_LC','Jha2006_LC']:
                            adjust=entries[1]
                            break
                except:
                    pass

            if adjust=='Riess1999_LC':
                cmd="sed 's/VEGA2/%s/' %s > temp1" % ('VEGA-R99',inputFile)
                
            elif adjust=='Jha2006_LC':
                cmd="sed 's/VEGA2/%s/' %s > temp1" % ('VEGA-J06',inputFile)

            if adjust!=None:
                sp.call(cmd,shell=True)
                cmd='cp temp1 %s' % (inputFile)
                sp.call(cmd,shell=True)

# We need to overwrite the file                

    return
示例#35
0
    parser = OptionParser()

    parser.add_option("-c", "--config", dest="config", default="JLA.config",
                      help="Parameter file containing the location of various JLA parameters")

    parser.add_option("-s", "--SNlist", dest="SNlist",
                      help="List of SNe")

    parser.add_option("-l", "--lcfits", dest="lcfits", default="lightCurveFits",
                      help="Key in config file pointing to lightcurve fit parameters")

    (options, args) = parser.parse_args()

    params = JLA.build_dictionary(options.config)

    lcfile = JLA.get_full_path(params[options.lcfits])
    SN_data = Table.read(lcfile, format='fits')

    SN_list_long = np.genfromtxt(options.SNlist, usecols=(0), dtype='S30')
    SN_list = [name.replace('lc-', '').replace('.list', '') for name in SN_list_long]
  
    SN_indices = JLA.reindex_SNe(SN_list, SN_data)
    SN_data = SN_data[SN_indices]

    velfile = JLA.get_full_path(params['velocityField'])
    vel_correction = VelocityCorrection(velfile)
    #z_correction = vel_correction.apply(SN_data)

    C_pecvel = vel_correction.covmat_pecvel(SN_data)

    date = JLA.get_date()
示例#36
0
def compute_C_K(options):
    import JLA_library as JLA
    import jla_FGCM as FGCM
    import numpy
    import astropy.io.fits as fits

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)

    # -----------  We read in the JLA version of C_Kappa ------------

    nDim = 52  # The number of elements in the DES C_Kappa matrix
    C_K_DES = numpy.zeros(nDim * nDim).reshape(nDim, nDim)

    SMP_ZP = 0.001                                 # The accuracy of the SMP ZPs
    # 6.6 mmag RMS scatter between FGCM and GAIA
    # The first factor of sqrt(2) comes from asuuming that GAIA and DES contrinute eually to the error
    # The second factor of sqrt(2) arises because we are taking the difference between two points, one where the standard is, and another where the SN is.

    uniformity = 0.0066 / numpy.sqrt(3.) # Following BBC
    nC26202_Observations = {'DES_g':133,'DES_r':21,'DES_i':27,'DES_z':78}                      # Number of times C26202 has been observed
    FGCM_unc = 0.005                               # The RMS scatter in FGCM standard magnitudes
    chromatic_differential = 0.0                   # Set to zero for now

    if options.base:
        # Read in the JLA matrix and extract the appropriate rows and columns
        # The matrix is structured in blocks with ZPs first,
        # and uncertainties in the filter curves second
        # The order is specified in salt2_calib_variations_all/saltModels.list
        # CfA3 and CfA4 are in rows 10 to 19 and 46 to 56 (starting at row 1) 
        # We write these to rows 1 to 10 and 27 to 36
        # CSP are in rows 20 to 25 and 57 to 62.
        # We write these to rows 11 to 16 and 37 to 42
        
        C_K_JLA = fits.getdata(JLA.get_full_path(params['C_kappa_JLA']))

        # Extract the relevant columns and rows
        # ZPs first
        # Since the indices for CfA4, CfA4, and CSP are consecutive, we do this all at once
        size = C_K_JLA.shape[0]
        C_K_DES[0:16, 0:16] = C_K_JLA[9:25,9:25]

        # Filter curves second
        C_K_DES[27:42, 27:42] = C_K_JLA[9+size/2:25+size/2,9+size/2:25+size/2]

        # Cross terms. Not needed, as they are zero
        # C_K_DES[0:16, 23:39] = C_K_JLA[9:25,9+size/2:25+size/2]
        # C_K_DES[23:39, 0:16] = C_K_JLA[9+size/2:25+size/2,9:25]

    # Read in the table listing the uncertainties in the ZPs and
    # effective wavelengths

    filterUncertainties = numpy.genfromtxt(JLA.get_full_path(params['filterUncertainties']),
                comments='#',usecols=(0,1,2,3,4), dtype='S30,f8,f8,f8,f8',
                names=['filter', 'zp', 'zp_off', 'wavelength', 'central'])

    # For the Bc filter of CfA, and the V1 and V2 filters of CSP,
    # we asumme that they have the same sized systematic uncertainteies as
    # B filter of CfA and V1 and V2 filters of CSP
    # We could either copy these terms across or recompute them.
    # We choose to recompute them 


    # Compute the terms in DES, this includes the cross terms
    # We first compute them separately, then add them to the matrix

    nFilters = len(filterUncertainties)
    C_K_new = numpy.zeros(nFilters*nFilters*4).reshape(nFilters*2, nFilters*2)

    # 1) DES controlled uncertainties  
    #   This uncertainty in the ZP has seeral components
    #   a) The uncertainty in the differential chromatic correction (set to zero for now)
    #   Note that this error is 100% correlated to the component of b) that comes from the filter curve
    #   b) The uncertainty in the measurement of the transfer to the AB system
    #      using the observations of C26202
    #   c) The SN field-to-field variation between DES and GAIA

    for i, filt in enumerate(filterUncertainties):
        if 'DES' in filt['filter']:
            error_I0,error_chromatic,error_AB=FGCM.prop_unc(params,filt)
            #print numpy.sqrt((error_AB)**2. + (FGCM_unc)**2. / nC26202_Observations[filt['filter']])
            C_K_new[i, i] = uniformity**2. + (error_AB)**2.+(FGCM_unc)**2. / nC26202_Observations[filt['filter']] + SMP_ZP**2.
            print '%s %5.4f' % (filt['filter'],numpy.sqrt(C_K_new[i, i]))
            C_K_new[i, i+nFilters] = (error_AB) * filt['wavelength']
            C_K_new[i+nFilters, i] = (error_AB) * filt['wavelength']
            C_K_new[i+nFilters, i+nFilters] = (filt['wavelength'])**2.
        else:
            C_K_new[i, i] = (filt['zp'] / 1000.)**2. + (filt['zp_off'] / 3. / 1000.)**2.


    # 2a) B14 3.4.1 The uncertainty associated to the measurement of
    # the Secondary CALSPEC standards
    # The uncerteinty is assumed to be uncorrelated between filters
    # It only affects the diagonal terms of the ZPs
    # It is estmated from repeat STIS measurements of the standard
    # AGK+81D266  Bohlin et al. 2000 AJ 120, 437 and Bohlin 1999 ISR 99-07

    nObs_C26202 = 1          # It's been observed once
    unc_transfer = 0.003     # 0.3% uncertainty

    for i, filt1 in enumerate(filterUncertainties):
        C_K_new[i, i] += unc_transfer**2. / nObs_C26202


    # 2b) B14 3.4.1 The uncertainty in the colour of the WD system 0.5%
    # from 3,000-10,000
    # The uncertainty is computed with respect to the Bessell B filter.
    # The Bessell B filter is the filter we use in computing the dist. modulus
    # The absolute uncertainty at the rest frame wavelengt of the B band
    # is not important here, as this is absorbed into the
    # combination of the absolute B band magnitude of SNe Ia and
    # the Hubble constant.

    slope = 0.005
    waveStart = 300
    waveEnd = 1000.
    # central = 436.0 # Corresponds to B filter
    central = 555.6   # Used in the Pantheon sample

    # Note that 2.5 * log_10 (1+x) ~ x for |x| << 1
    for i, filt1 in enumerate(filterUncertainties):
        for j, filt2 in enumerate(filterUncertainties):
            if i >= j:
                C_K_new[i, j] += (slope / (waveEnd - waveStart) * (filt1['central']-central)) * \
                                 (slope / (waveEnd - waveStart) * (filt2['central']-central))
                
    C_K_new = C_K_new+C_K_new.T-numpy.diag(C_K_new.diagonal())

    if options.base:
        # We do not update 
        sel = numpy.zeros(nDim, bool)
        sel[0:16] = True
        sel[23:39] = True
        sel2d = numpy.matrix(sel).T * numpy.matrix(sel)
        C_K_new[sel2d] = 0.0

    C_K_DES += C_K_new

    # Write out the results
    date = JLA.get_date()
    hdu = fits.PrimaryHDU(C_K_DES)
    hdu.writeto("%s_%s.fits" % (options.output, date), clobber=True)

    return
示例#37
0
def add_covar_matrices(options):
    """
    Python program that adds the individual covariance matrices into a single matrix
    """

    import time
    import numpy
    import astropy.io.fits as fits
    import JLA_library as JLA

    params = JLA.build_dictionary(options.config)

    # Read in the terms that account for uncertainties in perculiar velocities,
    # instrinsic dispersion and, lensing

    # Read in the covariance matrices
    matrices = []

    systematic_terms = [
        'bias', 'cal', 'host', 'dust', 'model', 'nonia', 'pecvel', 'stat'
    ]

    covmatrices = {
        'bias': params['bias'],
        'cal': params['cal'],
        'host': params['host'],
        'dust': params['dust'],
        'model': params['model'],
        'nonia': params['nonia'],
        'pecvel': params['pecvel'],
        'stat': params['stat']
    }

    for term in systematic_terms:
        matrices.append(fits.getdata(JLA.get_full_path(covmatrices[term]), 0))

    # Add the matrices
    size = matrices[0].shape
    add = numpy.zeros(size[0]**2.).reshape(size[0], size[0])
    for matrix in matrices:
        add += matrix

    # Write out this matrix. This is C_eta in qe. 13 of B14

    date = JLA.get_date()

    fits.writeto('C_eta_%s.fits' % (date), add, clobber=True)

    # Compute A

    nSNe = size[0] / 3

    jla_results = {'Om': 0.303, 'w': -1.027, 'alpha': 0.141, 'beta': 3.102}

    arr = numpy.zeros(nSNe * 3 * nSNe).reshape(nSNe, 3 * nSNe)

    for i in range(nSNe):
        arr[i, 3 * i] = 1.0
        arr[i, 3 * i + 1] = jla_results['alpha']
        arr[i, 3 * i + 2] = -jla_results['beta']

    cov = numpy.matrix(arr) * numpy.matrix(add) * numpy.matrix(arr).T

    # Add the diagonal terms

    sigma = numpy.genfromtxt(JLA.get_full_path(params['diag']),
                             comments='#',
                             usecols=(0, 1, 2),
                             dtype='f8,f8,f8',
                             names=['sigma_coh', 'sigma_lens', 'sigma_pecvel'])

    for i in range(nSNe):
        cov[i, i] += sigma['sigma_coh'][i]**2 + \
        sigma['sigma_lens'][i]**2 + \
        sigma['sigma_pecvel'][i]**2

    fits.writeto('C_total_%s.fits' % (date), cov, clobber=True)

    return
示例#38
0
def compute_nonIa(options):
    """Pythom program to compute the systematic unsertainty related to
    the contamimation from Ibc SNe"""

    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table, MaskedColumn, vstack
    import JLA_library as JLA

    # The program computes the covaraince for the spectroscopically confirmed SNe Ia only
    # The prgram assumes that the JLA SNe are first in any list
    # Taken from C11

    # Inputs are the rates of SNe Ia and Ibc, the most likely contaminant

    # Ia rate - Perett et al.
    # SN Ibc rate - proportional to the star formation rate - Hopkins and Beacom
    # SN Ib luminosity distribution. Li et al + bright SN Ibc Richardson

    # The bright Ibc population
    # d_bc = 0.25     # The offset in magnitude between the Ia and bright Ibc
    # s_bc = 0.25     # The magnitude spread
    # f_bright = 0.25 # The fraction of Ibc SN that are bright

    # Simulate the characteristics of the SNLS survey
    # Apply outlier rejection
    # All SNe that pass the cuts are included in the sample

    # One then has a mixture of SNe Ia and SNe Ibc
    # and the average magnitude at each redshift is biased. This
    # is called the raw bias. One multiplies the raw bias by the fraction of
    # objects classified as SNe Ia*

    # The results are presented in 7 redshift bins defined in table 14 of C11
    # We use these results to generate the matrix.
    # Only the SNLS SNe in the JLA sample are considered.
    # For the photometrically selected sample and other surveys, this will probably be different
    # JLA compute this for the SNLS sample only

    # We assume that the redshift in this table refers to the left hand edge of each bin

    z_bin = numpy.array([0.0, 0.1, 0.26, 0.41, 0.57, 0.72, 0.89, 1.04])
    raw_bias = numpy.array(
        [0.0, 0.015, 0.024, 0.024, 0.024, 0.023, 0.026, 0.025])
    f_star = numpy.array([0.0, 0.00, 0.06, 0.14, 0.17, 0.24, 0.50, 0.00])

    # The covaraiance between SNe Ia in the same redshift bin is fully correlated
    # Otherwise, it is uncorrelated

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)

    SNeList = numpy.genfromtxt(options.SNlist,
                               usecols=(0, 2),
                               dtype='S30,S200',
                               names=['id', 'lc'])

    nSNe = len(SNeList)
    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-',
                                                    '').replace('.list', '')

    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    # Add a bin column and a column that specified of the covariance is non-zero
    SNe['bin'] = 0
    SNe['eval'] = False

    # make order of data (in SNe) match SNeList

    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe = SNe[indices]

    # Identify the SNLS SNe in the JLA sample

    for i, SN in enumerate(SNe):
        if SN['source'][0] == 'JLA' and SN['name'][0][2:4] in [
                'D1', 'D2', 'D3', 'D4'
        ]:
            SNe['eval'][i] = True

    # Work out which redshift bin each SNe belongs to
    # In numpy.digitize, the bin number starts at 1, so we subtract 1
    SNe['bin'] = numpy.digitize(SNe['zhel'], z_bin) - 1

    # Build the covariance matrix

    C_nonIa = numpy.zeros(nSNe * 3 * nSNe * 3).reshape(nSNe * 3, nSNe * 3)

    # It is only computes the covariance for the spectroscopically confirmed SNLS SNe
    # We assume that covariance between redshift bins is uncorrelated

    for i in range(nSNe):
        bin1 = SNe['bin'][i]
        for j in range(nSNe):
            bin2 = SNe['bin'][j]
            if SNe['eval'][j] and SNe['eval'][i] and bin1 == bin2:
                C_nonIa[3 * i, 3 *
                        j] = (raw_bias[bin1] *
                              f_star[bin1]) * (raw_bias[bin2] * f_star[bin2])

    date = JLA.get_date()

    fits.writeto('C_nonIa_%s.fits' % date, numpy.array(C_nonIa), clobber=True)

    return
示例#39
0
def compute_C_K(options):
    import JLA_library as JLA
    import numpy
    import astropy.io.fits as fits

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)

    # -----------  We read in the JLA version of C_Kappa ------------

    if options.base:
        # CfA1 and CfA2 not treated separately and we use the JLA uncertainties
        nDim = 42
    else:
        # CfA1 and CfA2 treated separately, and we use the Pantheon uncertainties
        nDim = 58
        
    C_K_H0 = numpy.zeros(nDim * nDim).reshape(nDim, nDim)

    if options.base:
        # Read in the JLA matrix and extract the appropriate rows and columns
        # The matrix is structured in blocks with ZPs first,
        # and uncertainties in the filter curves second
        # The order is specified in salt2_calib_variations_all/saltModels.list
        # Standard, Landolt photometry is in rows 5 to 9 and rows 42 to 46
        # Keplercam is in rows 10 to 14 and 47 to 51
        # 4 Shooter is in rows 15 to 19 and 52 5o 56
        # CSP is in rows 20 to 25 and 56 to 62
        
        C_K_JLA = fits.getdata(JLA.get_full_path(params['C_kappa_JLA']))

        # Extract the relevant columns and rows
        # ZPs first
        # Since the indices are consecutive, we do this all at once
        size = C_K_JLA.shape[0]
        C_K_H0[0:21, 0:21] = C_K_JLA[4:25,4:25]

        # Filter curves second
        C_K_H0[21:42, 21:42] = C_K_JLA[4+size/2:25+size/2,4+size/2:25+size/2]
    else:
        filterUncertainties = numpy.genfromtxt(JLA.get_full_path(params['filterUncertainties']),
                comments='#',usecols=(0,1,2,3,4), dtype='S30,f8,f8,f8,f8',
                names=['filter', 'zp', 'zp_off', 'wavelength', 'central'])

        # 1) ZP and filter uncertainty
        # We add a third of the offset found in Scolnic et al.
        for i, filt in enumerate(filterUncertainties):
            C_K_H0[i, i] = (filt['zp'] / 1000.)**2. + (filt['zp_off'] / 3. / 1000.)**2.
            C_K_H0[i+29, i+29] = (filt['wavelength'])**2.


        # 2a) B14 3.4.1 The uncertainty associated to the measurement of
        # the Secondary CALSPEC standards
        # The uncerteinty is assumed to be uncorrelated between filters
        # It only affects the diagonal terms of the ZPs
        # It is estmated from repeat STIS measurements of the standard
        # AGK+81D266  Bohlin et al. 2000 AJ 120, 437 and Bohlin 1999 ISR 99-07

        # This is the most pessimistic option. We assume that only one standard was observed
        nObs = 1                 # It's been observed once
        unc_transfer = 0.003     # 0.3% uncertainty

        for i, filt1 in enumerate(filterUncertainties):
            C_K_H0[i, i] += unc_transfer**2. / nObs


        # 2b) B14 3.4.1 The uncertainty in the colour of the WD system 0.5%
        # from 3,000-10,000
        # The uncertainty is computed with respect to the Bessell B filter.
        # The Bessell B filter is the filter we use in computing the dist. modulus
        # The absolute uncertainty at the rest frame wavelengt of the B band
        # is not important here, as this is absorbed into the
        # combination of the absolute B band magnitude of SNe Ia and
        # the Hubble constant.

        slope = 0.005
        waveStart = 300
        waveEnd = 1000.
        # central = 436.0 # Corresponds to B filter
        central = 555.6   # Used in the Pantheon sample

        # Note that 2.5 * log_10 (1+x) ~ x for |x| << 1
        for i, filt1 in enumerate(filterUncertainties):
            for j, filt2 in enumerate(filterUncertainties):
                if i >= j:
                    C_K_H0[i, j] += (slope / (waveEnd - waveStart) * (filt1['central']-central)) * \
                        (slope / (waveEnd - waveStart) * (filt2['central']-central))
                

        C_K_H0 = C_K_H0+C_K_H0.T-numpy.diag(C_K_H0.diagonal())


    # Write out the results
    date = JLA.get_date()
    hdu = fits.PrimaryHDU(C_K_H0)
    hdu.writeto("%s_%s.fits" % (options.output, date), clobber=True)

    return
示例#40
0
def compute_Ccal(options):
    """Python program to compute Ccal
    """

    import numpy
    import astropy.io.fits as fits
    from astropy.table import Table

    import multiprocessing as mp
    import matplotlib.pyplot as plt

    # -----------  Read in the configuration file ------------

    params = JLA.build_dictionary(options.config)
    try:
        salt_prefix = params['saltPrefix']
    except KeyError:
        salt_prefix = ''

    # ---------- Read in the SNe list -------------------------

    SNeList = Table(
        numpy.genfromtxt(options.SNlist,
                         usecols=(0, 2),
                         dtype='S30,S100',
                         names=['id', 'lc']))

    for i, SN in enumerate(SNeList):
        SNeList['id'][i] = SNeList['id'][i].replace('lc-',
                                                    '').replace('.list', '')

    # ----------  Read in the SN light curve fits ------------
    # This is mostly used to get the redshifts of the SNe.
    lcfile = JLA.get_full_path(params[options.lcfits])
    SNe = Table.read(lcfile, format='fits')

    # Make sure that the order is correct
    indices = JLA.reindex_SNe(SNeList['id'], SNe)
    SNe = SNe[indices]

    # -----------  Set up the structures to handle the different salt models -------
    SALTpath = JLA.get_full_path(params['saltPath'])

    SALTmodels = JLA.SALTmodels(SALTpath + '/saltModels.list')
    nSALTmodels = len(SALTmodels) - 1
    #print SALTmodels, nSALTmodels

    nSNe = len(SNeList)
    print 'There are %d SNe in the sample' % (nSNe)
    print 'There are %d SALT models' % (nSALTmodels)

    # Add a survey column, which we use with the smoothing, and the redshift
    SNeList['survey'] = numpy.zeros(nSNe, 'a10')
    SNeList['z'] = SNe['zhel']

    # Identify the SNLS, SDSS, HST and low-z SNe. We use this when smoothing the Jacobian
    # There is probably a more elegant and efficient way of doing this

    # We need to allow for Vanina's naming convention when doing this for the photometric sample

    for i, SN in enumerate(SNeList):
        if SN['id'][0:4] == 'SDSS':
            SNeList['survey'][i] = 'SDSS'
        elif SN['id'][2:4] in ['D1', 'D2', 'D3', 'D4']:
            SNeList['survey'][i] = 'SNLS'
        elif SN['id'][0:2] == 'sn':
            SNeList['survey'][i] = 'nearby'
        else:
            SNeList['survey'][i] = 'high-z'

    # -----------   Read in the calibration matrix -----------------

    Cal = fits.getdata(JLA.get_full_path(params['C_kappa']))
    # Multiply the ZP submatrix by 100^2, and the two ZP-offset matrices by 100,
    # because the magnitude offsets are 0.01 mag and the units of the covariance matrix are mag
    Cal[0:37, 0:37] = Cal[0:37, 0:37] * 10000.
    #
    Cal[0:37, 37:] *= Cal[0:37, 37:] * 100.
    Cal[37:, 0:37] = Cal[37:, 0:37] * 100.

    #print SALTpath

    # ------------- Create an area to work in -----------------------

    try:
        os.mkdir(options.workArea)
    except:
        pass

    # -----------   The lightcurve fitting --------------------------

    firstSN = True

    log = open('log.txt', 'w')

    for i, SN in enumerate(SNeList):

        J = []
        try:
            os.mkdir(options.workArea + '/' + SN['id'])
        except:
            pass

        firstModel = True
        print 'Examining SN #%d %s' % (i + 1, SN['id'])

        # Set up the number of processes
        pool = mp.Pool(processes=int(options.processes))
        results = [
            pool.apply(runSALT,
                       args=(SALTpath, SALTmodel, salt_prefix, SN['lc'],
                             SN['id'])) for SALTmodel in SALTmodels
        ]
        for result in results[1:]:
            dM, dX, dC = JLA.computeOffsets(results[0], result)
            J.extend([dM, dX, dC])
        pool.close()  # This prevents to many open files
        if firstSN:
            J_new = numpy.array(J).reshape(nSALTmodels, 3).T
            firstSN = False
        else:
            J_new = numpy.concatenate(
                (J_new, numpy.array(J).reshape(nSALTmodels, 3).T), axis=0)

        log.write('%d rows %d columns\n' % (J_new.shape[0], J_new.shape[1]))

    log.close()

    # Compute the new covariance matrix J . Cal . J.T produces a 3 * n_SN by 3 * n_SN matrix
    # J=jacobian

    J_smoothed = numpy.array(J_new) * 0.0
    J = J_new

    # We need to concatenate the different samples ...

    if options.Plot:
        try:
            os.mkdir('figures')
        except:
            pass

    if options.smoothed:
        # We smooth the Jacobian
        # We roughly follow the method descibed in the footnote of p13 of B14
        # Note that HST is smoothed as well.
        nPoints = {'SNLS': 11, 'SDSS': 11, 'nearby': 11, 'high-z': 11}
        for sample in ['SNLS', 'SDSS', 'nearby']:
            selection = (SNeList['survey'] == sample)
            J_sample = J[numpy.repeat(selection, 3)]

            for sys in range(nSALTmodels):
                # We need to convert to a numpy array
                # There is probably a better way
                redshifts = numpy.array(
                    [z[0] for z in SNeList[selection]['z']])
                derivatives_mag = J_sample[
                    0::3][:, sys]  # [0::3] = [0,3,6 ...] Every 3rd one
                #print redshifts.shape, derivatives_mag.shape, nPoints[sample]
                forPlotting_mag, res_mag = JLA.smooth(redshifts,
                                                      derivatives_mag,
                                                      nPoints[sample])
                derivatives_x1 = J_sample[1::3][:, sys]
                forPlotting_x1, res_x1 = JLA.smooth(redshifts, derivatives_x1,
                                                    nPoints[sample])
                derivatives_c = J_sample[2::3][:, sys]
                forPlotting_c, res_c = JLA.smooth(redshifts, derivatives_c,
                                                  nPoints[sample])

                # We need to insert the new results into the smoothed Jacobian matrix in the correct place
                # The Jacobian ia a 3 * n_SN by nSATLModels matrix
                # The rows are ordered by the mag, stretch and colour of each SNe.
                J_smoothed[numpy.repeat(selection, 3),
                           sys] = numpy.concatenate(
                               [res_mag, res_x1,
                                res_c]).reshape(3, selection.sum()).ravel('F')

                # If required, make some plots as a way of checking

                if options.Plot:
                    print 'Creating plot for systematic %d and sample %s' % (
                        sys, sample)
                    fig = plt.figure()
                    ax1 = fig.add_subplot(311)
                    ax2 = fig.add_subplot(312)
                    ax3 = fig.add_subplot(313)
                    ax1.plot(redshifts, derivatives_mag, 'bo')
                    ax1.plot(forPlotting_mag[0], forPlotting_mag[1], 'r-')
                    ax2.plot(redshifts, derivatives_x1, 'bo')
                    ax2.plot(forPlotting_x1[0], forPlotting_x1[1], 'r-')
                    ax3.plot(redshifts, derivatives_c, 'bo')
                    ax3.plot(forPlotting_c[0], forPlotting_c[1], 'r-')

                    plt.savefig('figures/%s_sys_%d.png' % (sample, sys))
                    plt.close()

    date = JLA.get_date()

    fits.writeto('J_%s.fits' % (date), J, clobber=True)
    fits.writeto('J_smoothed_%s.fits' % (date), J_smoothed, clobber=True)

    # Some matrix arithmatic
    # C_cal is a nSALTmodels by nSALTmodels matrix

    # Read in a smoothed Jacobian specified in the options
    if options.jacobian != None:
        J_smoothed = fits.getdata(options.jacobian)


#    else:
#        # Replace the NaNs in your smoothed Jacobian with zero
#        J_smoothed[numpy.isnan(J_smoothed)]=0

    C = numpy.matrix(J_smoothed) * numpy.matrix(Cal) * numpy.matrix(
        J_smoothed).T
    if options.output == None:
        fits.writeto('C_cal_%s.fits' % (date), numpy.array(C), clobber=True)
    else:
        fits.writeto('%s.fits' % (options.output),
                     numpy.array(C),
                     clobber=True)

    return
def merge_lightcurve_fits(options):
    """Pythom program to merge the lightcurve fit results into a sigle format"""
    import numpy
    import astropy
    import JLA_library as JLA
    from astropy.table import Table, MaskedColumn, vstack

    params = JLA.build_dictionary(options.config)

    # ---------------- JLA ------------------------
    lightCurveFits = JLA.get_full_path(params['JLAlightCurveFits'])
    f=open(lightCurveFits)
    header=f.readlines()
    f.close()
    names=header[0].strip('#').split()

    # I imagine that the tables package in astropy could also be used to read the ascii input file
    SNeSpec = Table(numpy.genfromtxt(lightCurveFits,
                               skip_header=1,
                               dtype='S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
                               names=names))

    nSNeSpec=len(SNeSpec)
    print 'There are %d SNe from the spectrscopically confirmed sample' % (nSNeSpec)

    # Add an extra column to the table
    SNeSpec['source']=['JLA']*nSNeSpec

    # ---------------- Shuvo's sample ------------------------
    # Photometrically identified SNe in Shuvo's sample, if the parameter exists
    if params['photLightCurveFits']!='None':
        lightCurveFits = JLA.get_full_path(params['photLightCurveFits'])
        SNePhot=Table.read(lightCurveFits, format='fits')
        nSNePhot=len(SNePhot)

        print 'There are %d SNe from the photometric sample' % (nSNePhot)

        # Converting from Shuvo's names to thosed used by JLA
        conversion={'name':'name_adj', 'zcmb':None, 'zhel':'z', 'dz':None, 'mb':'mb', 'dmb':'emb', 'x1':'x1', 'dx1':'ex1', 'color':'c', 'dcolor':'ec', '3rdvar':'col27', 'd3rdvar':'d3rdvar', 'tmax':None, 'dtmax':None, 'cov_m_s':'cov_m_x1', 'cov_m_c':'cov_m_c', 'cov_s_c':'cov_x1_c', 'set':None, 'ra':None, 'dec':None, 'biascor':None}

        # Add the uncertainty in the mass column
        SNePhot['d3rdvar']=(SNePhot['col29']+SNePhot['col28'])/2. - SNePhot['col27']

        # Remove columns that are not listed in conversion
    
        for colname in SNePhot.colnames:
            if colname not in conversion.values():
                SNePhot.remove_column(colname)

    
        for key in conversion.keys():
            # Rename the column if it does not already exist
            if conversion[key]!=None and conversion[key]!=key:
                SNePhot.rename_column(conversion[key], key)
            elif conversion[key]==None:
                # Create it, mask it, and fill all values
                SNePhot[key]=MaskedColumn(numpy.zeros(nSNePhot), numpy.ones(nSNePhot,bool))
                SNePhot[key].fill_value=-99 # does not work as expected, so we set it explicitly in the next line
                SNePhot[key]=-99.9
            else:
                # Do nothing if the column already exists
                pass

        # Add the source column
        SNePhot['source']="Phot_Uddin"       

    # ----------------------  CfA4 ----------------------------------
    if params['CfA4LightCurveFits']!='None':
        lightCurveFits = JLA.get_full_path(params['CfA4LightCurveFits'])
        f=open(lightCurveFits)
        header=f.readlines()
        f.close()
        names=header[0].strip('#').split(',')    

        SNeCfA4=Table(numpy.genfromtxt(lightCurveFits,
                                       skip_header=1,
                                       dtype='S12,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8,f8',
                                       names=names,delimiter=','))

        nSNeCfA4=len(SNeCfA4) 
    
        conversion={'name':'name', 'zcmb':None, 'zhel':'z', 'dz':None, 'mb':'mb', 'dmb':'emb', 'x1':'x1', 'dx1':'ex1', 'color':'c', 'dcolor':'ec', '3rdvar':None, 'd3rdvar':None, 'tmax':None, 'dtmax':None, 'cov_m_s':'cov_m_x1', 'cov_m_c':'cov_m_c', 'cov_s_c':'cov_x1_c', 'set':None, 'ra':None, 'dec':None, 'biascor':None}

        # Remove columns that are not listed in conversion
    
        for colname in SNeCfA4.colnames:
            if colname not in conversion.values():
                SNeCfA4.remove_column(colname)
    
        for key in conversion.keys():
            # Rename the column if it does not already exist
            if conversion[key]!=None and conversion[key]!=key:
                SNeCfA4.rename_column(conversion[key], key)
            elif conversion[key]==None:
                # Create it, mask it, and fill all values
                SNeCfA4[key]=MaskedColumn(numpy.zeros(nSNeCfA4), numpy.ones(nSNeCfA4,bool))
                SNeCfA4[key].fill_value=-99 # does not work as expected, so we set it explicitly in the next line
                SNeCfA4[key]=-99.9
            else:
                # Do nothing if the column already exists
                pass

        # Add the source column
        SNeCfA4['source']="CfA4"   

    try:
        SNe=vstack([SNeSpec,SNePhot,SNeCfA4])
    except:
        SNe=SNeSpec

    # Write out the result as a FITS table
    date = JLA.get_date()
    SNe.write('%s_%s.fits' % (options.output, date), format='fits')

    return
def create_Models(options):
    import os

    params=JLA.build_dictionary(options.config)

    try:
        os.mkdir(options.output)
    except:
        print "Directory %s already exists" % (options.output)

    # Read in the SALT models that will be kept
    SALTmodels=Table.read(options.modelList,format='ascii',names=['ID','Type','Instrument','ShortName','fitmodel','MagSys','Filter'],data_start=0)

    modelList=[]
    # Go through the base models
    for model in os.listdir(JLA.get_full_path(options.base)):
        if model in SALTmodels['ID']:

            selection=(SALTmodels['ID']==model)
            modelSel=SALTmodels[selection]

            print "Copying across %s" % model
            modelList.append(model)
            shutil.copytree(options.base+'/'+model,options.output+'/'+model)
            # Copy salt2 directory to salt2-4
            shutil.copytree(options.output+'/'+model+'/snfit_data/salt2',options.output+'/'+model+'/snfit_data/salt2-4')
            # Update fitmodel.card
            shutil.copy(JLA.get_full_path(params['fitmodel']),options.output+'/'+model+'/snfit_data/fitmodel.card')
            # Add the DECam instrument files
            shutil.copytree(JLA.get_full_path(params['DES_instrument']),options.output+'/'+model+'/snfit_data/Instruments/DECam')

            # Update the Keplercam instrument files
            # We added the revised filter curves B,V,r, and i, and Bc, Vc, rc, and ic
            shutil.rmtree(options.output+'/'+model+'/snfit_data/Instruments/Keplercam')
            shutil.copytree(JLA.get_full_path(params['CfA_instrument']),options.output+'/'+model+'/snfit_data/Instruments/Keplercam')

            # Since we overwrite the Keplercam instrument files, we need to offset offset the filter curves
            if modelSel['Type']=='Filter' and modelSel['Instrument']==['KEPLERCAM']:
                print 'Adjusting filter for model %s' % modelSel['ID'][0]
                offsetFilter(options.output+'/'+modelSel['ID'][0]+'/snfit_data/'+modelSel['fitmodel'][0]+'/'+modelSel['Filter'][0],modelSel['Instrument'][0])

            # Add DES magnitude system
            shutil.copy(JLA.get_full_path(params['DES_magsys']),options.output+'/'+model+'/snfit_data/MagSys/')

            # Update the CfA and CSP magnitude systems
            shutil.copy(JLA.get_full_path(params['CfA_magsys']),options.output+'/'+model+'/snfit_data/MagSys/')

            # Since we update the magnitude systems, we need to offset the ZPs for KEPLERCAM, 4SHOOTER, and SWOPE
            if modelSel['Type']=='ZP' and modelSel['Instrument'] in ['KEPLERCAM','4SHOOTER2','SWOPE2']:
                print 'Adjusting ZP for model %s' % modelSel['ID'][0]
                offsetZP(options.output+'/'+modelSel['ID'][0]+'/snfit_data/MagSys/'+modelSel['MagSys'][0],modelSel['ShortName'][0],modelSel['Instrument'][0],modelSel['fitmodel'][0])
        else:
            print "Excluding %s" % model


    print 'We start with %d models from JLA' % (len(modelList))

    # ---------  Add new models --------------

    newModels=Table.read(options.add,format='ascii', comment='#')
    for model in newModels:
        # Copy accross the base model
        shutil.copytree(JLA.get_full_path(model['baseModel']),options.output+'/'+model['modelNumber'])
        print 'Creating %s' % (model['modelNumber'])

        # Copy salt2 directory to salt2-4
        shutil.copytree(options.output+'/'+model['modelNumber']+'/snfit_data/salt2',options.output+'/'+model['modelNumber']+'/snfit_data/salt2-4')

        # Remove the old base instrument, if it exists and replace it with a new one
        try:
            shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel'])
        except:
            pass

        shutil.copytree(JLA.get_full_path(model['baseInstrument']+model['fitmodel']),options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel'])
        print JLA.get_full_path(model['baseInstrument']+model['fitmodel']),options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel']

        # Remove the old MagSys directory and replace it with the new one
        shutil.rmtree(options.output+'/'+model['modelNumber']+'/snfit_data/MagSys')
        shutil.copytree(JLA.get_full_path(model['baseInstrument'])+'MagSys',options.output+'/'+model['modelNumber']+'/snfit_data/MagSys')
        
        # Replace the fitmodel.card it with the new one
        shutil.copy(JLA.get_full_path(model['baseInstrument'])+'fitmodel.card',options.output+'/'+model['modelNumber']+'/snfit_data/fitmodel.card')

        # Modify filter curve and ZP
        if model['Type']=='filt':
            offsetFilter(options.output+'/'+model['modelNumber']+'/snfit_data/'+model['fitmodel']+'/'+model['Filter'],model['Instrument'])
        else:
            offsetZP(options.output+'/'+model['modelNumber']+'/snfit_data/MagSys/'+model['MagSys'],model['ShortName'],model['Instrument'],model['fitmodel'])


        # We now update the list of instruments in the newly created surfaces
        # This code is not clear
        if model['Instrument']=='DECAM':
            # Update just the Keplercam instrument files
            # There is no need to update Swope2, as the new filters are in the base model
            updateKeplercam(options,params,model)
        elif model['Instrument']=='KEPLERCAM':
            # Update just the DES instrument files
            # There is no need to update Swope2, as the new filters are in the base model
            updateDES(options,params,model)
        else: # The case for swope ...
            # Update both the Keplercam and DES fles
            updateDES(options,params,model)
            updateKeplercam(options,params,model)




        modelList.append(model['modelNumber'])

    print 'We now have %d models' % (len(modelList))
    
    # ---- Copy accross the saltModels.list ----

    shutil.copy(options.modelList,options.output+'/saltModels.list')
    

    return