示例#1
0
def main():
    description = "TBD"
    epilog = "TBD"
    arg_parser = argparse.ArgumentParser(description=description,
                                         epilog=epilog)
    arg_parser.add_argument('-v',
                            '--version',
                            action='version',
                            version='%(prog)s ' + __version__)
    arg_parser.add_argument("-f",
                            metavar="FILTER",
                            dest='filter',
                            default="GROUND_JOHNSON_V")
    arg_parser.add_argument("spectra",
                            metavar="FILE",
                            nargs='*',
                            help="spectrum data")
    args = arg_parser.parse_args()

    lib = pyphot.get_library()
    f = lib[args.filter]

    for spectrum_file in args.spectra:
        spectrum_data = np.loadtxt(spectrum_file, unpack=True, dtype='float')

        passband_flux = f.get_flux(spectrum_data[0, :], spectrum_data[1, :])
        mag = -2.5 * np.log10(passband_flux) - f.Vega_zero_mag
        print(mag)
示例#2
0
    def photometry(self, band):
        """
        Evaluate photometry in a named photometric band, using pyphot.

        :param band:
            Name of photometric band, as recognised by pyphot. See
            <http://mfouesneau.github.io/docs/pyphot/libcontent.html> for a list of recognised bands.

        :type band:
            str

        :return:
            float AB magnitude
        """

        import pyphot, tables
        lib = pyphot.get_library()

        try:
            photometer = lib[band]
        except tables.exceptions.NoSuchNodeError:
            logger.error("Could not find photometric band <{}>".format(band))
            raise

        wavelengths = self.wavelengths * pyphot.unit['AA']
        fluxes = self.values * pyphot.unit['erg/s/cm**2/AA']

        flux = photometer.get_flux(slamb=wavelengths, sflux=fluxes)
        mag = -2.5 * np.log10(flux) - photometer.AB_zero_mag
        return mag
示例#3
0
def library(lamb, spectra, filt, band='g', get_sun='N'):

    # Internal default library of passbands filters
    lib = pyphot.get_library()

    # Defining the filter band library
    f = lib[filt + '_' + band]

    if get_sun == 'Y':
        sun_obs = pyphot.Sun(flavor='observed')  # Getting the solar spectrum
        wave_sun = sun_obs.wavelength.magnitude
        spec_sun = sun_obs.flux.magnitude

        BC_sun = transmission(wave_sun, spec_sun, band)

        # Getting the Sun absolute magnitude
        fluxes = f.get_flux(wave_sun, spec_sun)

        # Convert to vega magnitudes
        mags = -2.5 * np.log10(fluxes.value) - f.Vega_zero_mag
        dist = 1.49597871e11 / (3.0857e16)  # Astronomical unit in parsec
        M_sun = mags - 5.0 * np.log10(
            dist / 10.0)  # Absolute magnitude of the Sun

        return BC_sun, M_sun

    else:
        # Compute the integrated flux through the filter f
        # note that it work on many spectra at once
        fluxes = f.get_flux(lamb, spectra)

        # Convert to vega magnitudes
        mags = -2.5 * np.log10(fluxes.value) - f.Vega_zero_mag

        return mags, fluxes
示例#4
0
def get_effective_wavelength(band):
    """Get central wavelength of a specific filter in um."""
    # Load photometry filter library
    filt = pyphot.get_library()[band]
    # Get central wavelength in um
    leff = filt.cl.to('um').magnitude
    return leff
def filter_logFlux(survey_name, filter_names, wavelength, wavelength_um, flux,
                   len_data):
    wavelength_nonum = wavelength_um
    lib = pyphot.get_library()

    obj_list = list(range(0, len_data, 1))
    plt.figure()
    filter_logflux_ = {}
    filters_clWL_ = {}
    #     print("len_obj_list", len(obj_list))

    for objects in obj_list:
        filters = lib.load_filters(
            filter_names, lamb=wavelength[objects])  # * u.aa)#*wl_unit)

        mags = []
        mags_flux = []
        filter_logflux_[objects] = []
        filters_clWL_[objects] = []
        filters_effWL = []
        filters_clWL = []
        for name, fn in zip(filter_names, filters):
            flux0 = fn.get_flux(wavelength[objects], flux[objects])
            #             print("flux0",flux0)

            filters_effWL.append(fn.leff.magnitude)  #,casting="unsafe")
            filters_clWL.append(fn.cl.magnitude * 1e+4)
            f = flux0  # - ABf
            #             print("f",f)
            filters_clWL_[objects].append(filters_clWL)
            filter_logflux_[objects].append(np.log10(f) + 19)
#             print("filter_logflux_",filter_logflux_)
#     print("filters_clWL_",filters_clWL_)

    return filter_logflux_, filters_clWL_
示例#6
0
def get_bandpass(band):
    """Get the bandpass of a specific filter in um."""
    # Load photometry filter library
    filt = pyphot.get_library()[band]
    # Get lower and upper bandpass in um
    width = filt.width.to('um').magnitude
    bp = width
    return bp / 2
示例#7
0
def get_band_info(band):
    """Look for the filter information in the pyphot library of filters."""
    # TODO: rename?
    # Load photometry filter library
    filt = pyphot.get_library()[band]
    # Get Vega zero flux in erg / cm2 / s / um
    f0 = filt.Vega_zero_flux.to('erg/(um * cm ** 2 * s)').magnitude
    return f0
示例#8
0
def measure_mags(wave, flux, filts = ['GROUND_JOHNSON_B','GROUND_JOHNSON_V']):
    lib = pyphot.get_library()
    mags = []
    for f in filts:
        func = lib[f]
        fflux = func.get_flux(wave, flux, axis = -1)
        mag = -2.5 * np.log10(fflux) - func.Vega_zero_mag
        mags.append(mag)
    return mags
示例#9
0
def scale_flux_to_photometry(spec, valid_bands):
    lib = pyphot.get_library()
    band_dict = {
        'U': 'GROUND_JOHNSON_U',
        'B': 'GROUND_JOHNSON_B',
        'V': 'GROUND_JOHNSON_V',
        'R': 'GROUND_COUSINS_R',
        'I': 'GROUND_COUSINS_I',
        'u': 'SDSS_u',
        'g': 'SDSS_g',
        'r': 'SDSS_r',
        'i': 'SDSS_i',
        'z': 'SDSS_z'
    }

    if len(valid_bands) > 0:
        mags_from_phot = generate_photometry_for_epoch(spec, valid_bands)

        valid_mjds = []
        for band in mags_from_phot:
            if ~np.isnan(mags_from_phot[band]):
                valid_mjds.append(band)
        if len(valid_mjds) > 0:
            filts = {}
            for b in valid_mjds:
                filts[b] = lib[band_dict.get(b)]

            guess = 1.e-14
            scale = opt.minimize(total_phot_offset,
                                 guess,
                                 args=(spec, filts, mags_from_phot),
                                 method='Nelder-Mead').x
            scale = scale[0]

            spec_phots = {}
            for b in valid_mjds:
                # final_flux = filts[b].get_flux(spec.wavelength[spec.x1:spec.x2], scale*spec.flux[spec.x1:spec.x2], axis = -1)
                final_flux = get_filter_flux(
                    spec.wavelength[spec.x1:spec.x2],
                    scale * spec.flux[spec.x1:spec.x2], filts[b])
                final = -2.5 * np.log10(final_flux) - filts[b].Vega_zero_mag
                spec_phots[b] = final
            print scale, spec_phots, mags_from_phot  #for testing output
        else:
            scale = np.nan
            print scale, "mjd of spectrum outside photometric coverage"
    else:
        scale = np.nan
        mags_from_phot = None

    return scale, mags_from_phot
示例#10
0
def makegrid(infile = 'filters.csv', libraryFile = 'filters.hd5'):
    """Compute GRAMS synthetic photometry in all the bands specified in infile, using the information
    from the filter library.
    INPUTS
       1) infile: a two-column CSV file of which the second column must contain
       must contain the names of the SVO/VOSA filter files to download. The first 
       column is not currently used, but can contain identifying information for 
       each filter that connects it back to the data.
       The filter names can be in the order of occurrence in the data, and may include
       repetitions (as it is quite possible that the data is compiled from a number of
       differing sets of observations).
       NOTE: this file must have a one-line header.

       2) libraryFile: the name of for the output hdf5 library.
    """
    filters_used = Table.read(infile, format = 'csv', names = ('column', 'filterName'))
    filterLibrary = pyp.get_library(fname = libraryFile)
    filterNames = [f['filterName'].replace('/','_') for f in filters_used]
    chemtype = ['o', 'c']
    #Links to the grid files on Google Drive. Is there a more elegant solution?
    file_link = {'o': 'https://ndownloader.figshare.com/files/9684331', \
                 'c': 'https://ndownloader.figshare.com/files/9684328'}
    for c in chemtype:
        gridfile = 'grams_' + c + '.fits'
        if os.path.isfile(gridfile):
            subprocess.call(['rm', gridfile])
        grid, header = fits.getdata(file_link[c], 1, header = True)
        #The original FITS_rec object is turned into an astropy Table for manipulation.
        #   It is then turned into a HDU object for output.
        grid = Table(grid) #conversion step 1
        print("Renaming 'MLR' column to 'DPR'")
        grid.rename_column('MLR', 'DPR') #Changing MLR column name to DPR
        inlam = grid[0]['Lspec']
        infnu = grid['Fspec']
        filters = filterLibrary.load_filters(filterNames, interp = True, lamb = inlam * pyp.unit['micron'])
        _, seds = pyp.extractSEDs(inlam, infnu, filters, Fnu=True, absFlux=False)
        filters_used['lpivot'] = np.array([f.lpivot.magnitude for f in filters])
        del grid['Fphot']
        grid['Fphot'] = seds
        #Update the magnitudes as well
        zp = np.array([f.Vega_zero_Jy.magnitude for f in filters])
        del grid['mphot']
        grid['mphot'] = -100**(1/5.0) * np.log10(grid['Fphot'] / np.repeat(zp[np.newaxis, :], len(grid), axis = 0))
        g = fits.table_to_hdu(grid) #conversion step 2
        g.header = editgridheader(header, grid, filters_used)
        g.writeto(gridfile, overwrite = True)
示例#11
0
def synthphot(inlam, influx, filtlist='SPITZER_IRAC_36', fnu=True, **kwargs):
    """
    Function to compute synthetic photometry using pyphot routines.  
    
    For now, assumes that INLAM is in micron
    FILTLIST is the list of filters. The script should check if these exist in the library.

    List of inputs go here with description

    List of outputs go here with description
    """
    lib = pyphot.get_library()
    f = lib['SPITZER_IRAC_36']
    if fnu:
        out = f.get_flux(1e4*inlam[0], influx*(f.lpivot._magnitude/(inlam*1e4))**2)
    else:
        out = f.get_flux(1e4*inlam, influx)
    return out
示例#12
0
    def pyphotSetup(self, libName=None, **kwargs):
        ''' Given the data, read in the pyphot filter library and make sure we have the right list of filters in memory 

        Parameters
        ----------
        libName : str, optional
            The name of the filter library to use

        Notes
        ------
        Future work: go through multiple libraries from different (user-defined) locations and import htem all
        '''

        if libName is None:
            print("No library given, using default pyphot filters")
            libDir = pyphot.__file__.strip('__init__.py') + 'libs/'
            libName = libDir + 'synphot_nonhst.hd5'  #PhIReSSTARTer.hd5'

        self.filterLibrary = pyphot.get_library(fname=libName)
示例#13
0
def measure_comp_1m2(comps, filts = ['GROUND_JOHNSON_B','GROUND_JOHNSON_V'], boot_arrs = None, error=False):
    lib = pyphot.get_library()
    func1 = lib[filts[0]]
    func2 = lib[filts[1]]
    
    comp_1 = []
    comp_2 = []
    phases = []
    errors = []
    kpora.set_min_num_spec(comps, 5)
    for comp in comps:
        mag1, mag2 = measure_mags(comp.wavelength[comp.x1:comp.x2], comp.flux[comp.x1:comp.x2], filts = filts)
        comp_1.append(mag1)
        comp_2.append(mag2)
        phases.append(np.average(comp.phase_array[comp.x1:comp.x2]))
        
    if error:
        boot_1m2s = []
        errors = []
        for boots in boot_arrs:
            boot_1 = []
            boot_2 = []
            for b in boots:
                mag1, mag2 = measure_mags(b.wavelength[b.x1:b.x2], b.flux[b.x1:b.x2], filts=filts)
                if ~np.isnan(mag1) and ~np.isnan(mag2):
                    boot_1.append(mag1)
                    boot_2.append(mag2)
            diff = np.asarray(boot_1) - np.asarray(boot_2)
            boot_1m2s.append(diff)
        low_errors = []
        up_errors = []
        for clist in boot_1m2s:
            p = np.percentile(clist, [18, 50, 82])
            clower = p[1] - p[0]
            cupper = p[2] - p[1]
            low_errors.append(cupper)
            up_errors.append(clower)
        errors = [low_errors, up_errors]
        
    return phases, comp_1, comp_2, errors
示例#14
0
import pyphot #synthetic photometry
from astropy.table import Table #synthetic photometry
import numpy as np #input/output
from astropy.io import fits #input/output
from matplotlib import pyplot as plt #visualisation
pkgdir = '/usr/local/lib/python2.7/site-packages/pyphot/' #Edit this depending on user
libsdir = '/usr/local/lib/python2.7/site-packages/pyphot/libs/' #Edit this depending on user
demodir = '/Users/sundar/work/pyphot/pyphot/demo/' #Edit this depending on user

"""Load entire HDF5 library"""
libraryName=pkgdir+'libs/synphot_PhIReSSTARTer.hd5'
filterLibrary = pyphot.get_library(fname=libraryName)

"""Names of the first ten filters available in the library"""
filterNames = filterLibrary.get_library_content()
for i in range(9):
        print filterNames[i]

"""Load information for a single filter"""
ans_3300 = filterLibrary.load_filters(['ans_3300']) #result is a list, even for a single filter!
"""Load information for a list of filters"""
filters = filterLibrary.load_filters(['ans_3300', 'steward_k'])

"""View filter information"""
ans_3300[0].info()

"""We will use the same set of filters for both examples below"""
filterNames = ['MCPS_U', 'MCPS_B', 'MCPS_U', 'MCPS_I', '2MASS_J', '2MASS_H', '2MASS_Ks',
                                  'SPITZER_IRAC_36', 'SPITZER_IRAC_45', 'SPITZER_IRAC_58', 'SPITZER_IRAC_80',
                                  'SPITZER_MIPS_24']
libraryName = libsdir+'synphot_PhIReSSTARTer.hd5'
from Image_Rebin import Image_Rebin


"""

Main image convolution script. 

Step 1: Image_FilterConvolve function - Convolving the four wavelength images output by RT modelling with the Filter profiles of their instruments.

Step 2:  Image_BeamConvolve function - Convolving the fileter convolved images with the beams of each instrument. 

"""

Source_Distance = 268 * pc #Units = pc
OutPutFiles = ["Model_FourShells_EvenMass.rtout", "Model_FourShells_UnEvenMass.rtout", "Model_InnerShell_Only.rtout", "Model_OuterShell_Only_K2010.rtout", "Model_OuterShell_Only_M2010.rtout", "Model_ShellThree_Only.rtout", "Model_ShellTwo_Only.rtout"] #RT output files
Filter_Library = pyphot.get_library(fname="Ampere_FiterProfile_Library.hdf5") #Getting Instrument Filters for Filter Convolution of the SED

#Filter names of the image wavelengths as given in the Ampere Filter porifles library. 
#'HERSCHEL_PACS_BLUE', 'HERSCHEL_PACS_RED', 'JCMT_SCUBA2_450', 'JCMT_SCUBA2_850'


#Needed for Beam Conv
model_max_envelope_size = 80 #Values used in RT modelling
model_arcsec_size = model_max_envelope_size * 3 #Values used in RT modelling
model_pix_size = 400 #Values used in RT modelling -  (400 x 400 pixels)
model_pix_arcsec = model_arcsec_size / model_pix_size #Size of each pixel in model image in arcsecs


for filename in OutPutFiles:

    model = ModelOutput(filename)
示例#16
0
def filter_plot(survey_name, wavelength, wavelength_um, flux, index_list):
    wavelength=wavelength_um
    wavelength_nonum=wavelength_um
    df = pd.read_csv("pyphot/table.csv")
    table = df[df["name"].str.contains(survey_name)]

    ###drop 150w2 from JWST
    a = ["JWST_NIRCAM_F150W2"]
    table = table[~table['name'].isin(a)]

    ###sort the table
    table = table.sort_values(by='effective wavelength')
    filter_names = list(table["name"])
    lib = pyphot.get_library()

    obj_list=index_list
    plt.figure(figsize=(8,6))
    for objects in obj_list:
        filters = lib.load_filters(filter_names, lamb=wavelength[objects])# * u.aa)#*wl_unit)

        mags=[]
        mags_flux=[]
        filter_flux=[]
        filters_clWL=[]
        for name, fn in zip(filter_names, filters):
            flux0=fn.get_flux(wavelength[objects], flux[objects])
            filters_clWL.append(fn.cl.magnitude *1e+4)
            f= flux0 #- ABf
            filter_flux.append(f)




##########FIRST plot---------------------------

        plt.scatter(filters_clWL , filter_flux, s=10, marker='o')#, label=survey_name)
        # plt.plot(filters_clWL, np.log(filter_flux))
        plt.xlabel(r"$\lambda (\AA)$")
        plt.xlim(3000,24500)
        # plt.xscale("log")
        plt.ylabel("Flux "+r"($erg s^{-1} cm^{-2} Hz^{-1}$)")
        plt.ylim(1e-21, 1e-13)
        plt.yscale("log")
        plt.title(str(survey_name))




    plt.savefig("Photometric_SamplePlot/Flux_filter_continuum"+str(survey_name)+".png")


    ############----------------------------log flux plots
    plt.figure(figsize=(12, 8.5))
    
    for objects in obj_list:
        filters = lib.load_filters(filter_names, lamb=wavelength[objects])# * u.aa)#*wl_unit)
#         print("filters", filters)



        mags=[]
        mags_flux=[]
        filter_flux=[]
        filters_clWL=[]
        for name, fn in zip(filter_names, filters):
            flux0=fn.get_flux(wavelength[objects], flux[objects])
            filters_clWL.append(fn.cl.magnitude * 1e+4)
            f= flux0 #- ABf
            filter_flux.append(f)
            
        plt.scatter(wavelength[objects] * 1e+4, np.log10(flux[objects])+19 , s=5, marker=',') 
        plt.scatter(filters_clWL, np.log10(filter_flux) + 19, s=20, label=survey_name)
        
        plt.xlim(3000,24500)
        plt.xscale('log')
        plt.xticks(np.arange(3000,24000, step=1000), rotation=30)#, size=12)
        plt.yticks(size=18)
        plt.xlabel(r"$\lambda (\AA)$",size=18)
        plt.ylabel("log Flux "+r"($erg s^{-1} cm^{-2} Hz^{-1}$) + 19", size=20)

    plt.title(str(survey_name), size=20)
    plt.savefig("Photometric_SamplePlot/logFlux_filter"+str(survey_name)+".png")
示例#17
0
def ssp_model(Z,
              feh=None,
              afe=None,
              age=None,
              imf=None,
              slope=None,
              fwhm=0.2,
              dl=0.1,
              CFe=0.0,
              CFe_rgb=None,
              NFe=0.0,
              NFe_rgb=None,
              OFe=None,
              MgFe=None,
              SiFe=None,
              CaFe=None,
              TiFe=None,
              NaFe=0.0,
              AlFe=0.0,
              BaFe=0.0,
              EuFe=0.0,
              n_ms=None,
              n_rg=None,
              logg_cn=3,
              parfile=None,
              iso='DARTMOUTH'):

    #---------------------------------
    # CHECKING INPUTS
    #---------------------------------
    if feh is None:
        feh = input('Need to specify [Fe/H]: ')
    if afe is None:
        afe = input('Need to specify [alpha/Fe]: ')
    if age is None:
        age = input('Need to specify population age in Gyr: ')
    if imf is None:
        imf = input('Need to specify IMF (Salpeter, Kroupa, Unimodal): ')
    imf = imf.lower()

    if imf == 'unimodal' and slope is None:
        print('IMF = unimodal, you need to specify a slope')

    if n_ms is None and n_rg is None and parfile is None:
        print('FILE WITH STELLAR PARAMETERS --> ???')
        print(' Need to specify n_ms & n_rg OR parfile')
    elif n_ms is not None and n_rg is not None:
        parsfile = stpars.set_stpars_filename(n_ms, n_rg, feh, afe, age)
    else:
        parsfile = parfile

    #---------------------------------
    # SETTING DEFAULT VALUES
    #---------------------------------
    if CFe_rgb is None:
        CFe_rgb = CFe
    if NFe_rgb is None:
        NFe_rgb = NFe
    if OFe is None:
        OFe = afe
    if MgFe is None:
        MgFe = afe
    if SiFe is None:
        SiFe = afe
    if CaFe is None:
        CaFe = afe
    if TiFe is None:
        TiFe = afe

    #---------------------------------
    # OUTPUT FILE NAMES
    #---------------------------------
    exists = os.path.isfile('SSP_Spectra/')
    if exists is False:
        os.system('mkdir SSP_Spectra')

    file_ssp = set_ssp_filename(feh, afe, age, imf, slope, CFe, CFe_rgb, NFe,
                                NFe_rgb, OFe, MgFe, SiFe, CaFe, TiFe, NaFe,
                                AlFe, BaFe, EuFe)
    file_fig = file_ssp + '.jpg'
    logfile = file_ssp + '.log'

    #---------------------------------
    # WRITE INFO IN LOGFILE
    #---------------------------------
    log = open(logfile, 'w+')

    log.write(
        '----------------------- INPUT PARAMETERS -----------------------\n')
    log.write(
        '----------------------------------------------------------------\n')

    log.write('System time:  ' + datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
              '\n')

    temp = '%-26s%8.2f%4s' % ('Isochrone age: ', age, 'Gyr')
    log.write(temp + '\n')
    temp = '%-26s%8.2f%2s' % ('FWHM: ', fwhm, ' A')
    log.write(temp + '\n')

    if imf == 'unimodal':
        temp = '%-26s%8s%8s%8.2f' % ('IMF: ', imf, ' slope: ', slope)
    else:
        temp = '%-26s%8s' % ('IMF: ', imf)
    log.write(temp + '\n')

    log.write('Abundances:\n')
    temp = '%8s%8s%8s%8s%8s%8s%8s%8s%8s%8s' % (
        '[Fe/H]', '[a/Fe]', '[C/Fe]', '[N/Fe]', '[O/Fe]', '[Mg/Fe]', '[Si/Fe]',
        '[Ca/Fe]', '[Ti/Fe]', '[Na/Fe]')
    log.write(temp + '\n')

    temp = '%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f%8.2f' % (
        feh, afe, CFe, NFe, OFe, MgFe, SiFe, CaFe, TiFe, NaFe)
    log.write(temp)

    #---------------------------------
    # CONSTANTS
    #---------------------------------
    Lsun = 3.839e33  # erg s-1
    pc = 3.086e18  # parsec in cm
    G = 6.67300e-11  # m3 kg-1 s-2
    Msun = 1.9891e33  # g
    sb = 5.67e-5  # erg cm-2 s-1 K-4
    Rsun = np.sqrt(Lsun /
                   (4 * np.pi * sb * 5777**4)) * (10**(-2))  # = 6.955e10 cm
    aSun = 4 * np.pi * Rsun**2  # cm2

    #---------------------------------
    # READ INPUTS FILE
    #---------------------------------
    t = ascii.read(parsfile)

    nstars = len(t)

    Teffs = np.zeros(nstars)
    loggs = np.zeros(nstars)
    masses = np.zeros(nstars)
    lumis = np.zeros(nstars)
    phase = list(itertools.repeat("ms", nstars))

    for i in range(nstars):
        Teffs[i] = t[i][0]
        loggs[i] = t[i][1]
        masses[i] = t[i][2]
        lumis[i] = 10**t[i][3]
        phase[i] = t[i][4]

    radii = np.sqrt(lumis * Lsun / (4 * np.pi * sb * Teffs**4))  # cm
    radii = radii * (10**(-2))  # m
    area = 4 * np.pi * radii**2  # m2

    log.write('\nReading stellar parameters from:  ' + parsfile + '\n')
    log.write(
        '----------------------------------------------------------------\n\n')

    #---------------------------------
    # MASS BINS
    #---------------------------------
    logL = np.log10(lumis)
    logL_breaks = np.zeros(len(logL))
    nn = len(lumis)

    lum_breaks = np.zeros(nn + 1)

    for i in range(nn + 1):
        if i == 0:
            lum_breaks[i] = lumis[0]
        if i > 0 and i < nn:
            lum_breaks[i] = np.mean([lumis[i - 1], lumis[i]])
        if i == nn:
            lum_breaks[i] = lumis[nn - 1]

    isofile = stpars.gettracks(feh, afe, age, iso=iso)
    F0 = 1.021e-20  # 1.1596e-10 #Vega flux of Hband in erg cm-2 s-1 Hz-1
    t = np.loadtxt(isofile)
    if iso.upper() == 'DARTMOUTH':
        isoteff = 10**t[:, 2]
        isologg = t[:, 3]
        isomass = t[:, 1]
        isologL = t[:, 4]
    if iso.upper() == 'PADOVA':
        isoteff = 10**t[:, 6]
        isologg = t[:, 7]
        isomass = t[:, 2]
        isologL = t[:, 5]
        isoHFlux = 10**(-t[:, 30] / 2.5) * F0
        isologLH = np.log10(isoHFlux * 4 * np.pi * (10 * pc)**2)
    print(iso)

    isoL = 10**isologLH
    mass_breaks = interp(isologLH, np.log10(lum_breaks), isomass)
    mass_breaks[0] = masses[0]
    mass_breaks[-1] = masses[-1]

    # Correct mass_breaks if interpolation fails for stars close to the turn-off
    for i in range(nn):
        if mass_breaks[i] > mass_breaks[i + 1] or mass_breaks[i] > masses[i]:
            mass_breaks[i] = masses[i] - (masses[i] - masses[i - 1]) / 2
            logL_breaks[i] = logL[i] - (logL[i] - logL[i - 1]) / 2

    #---------------------------------
    # SSP SPECTRA
    #---------------------------------
    fraction_M = np.zeros(len(masses))
    fraction_L = np.zeros(len(masses))
    sum_flux = np.zeros(len(masses))
    L_corr = np.zeros(len(masses))
    #inter = interp1d(isomass, isoHmag, fill_value = "extrapolate")

    log.write('SSP divided in:  ' + str(nstars) + ' mass bins\n')
    log.write('Information about each bin: \n')
    log.write('----------------------\n')
    data = '%5s%10s%10s%10s%10s%7s%7s%10s%10s%10s%10s%10s%10s%10s' % (
        'i', 'logL_i', 'logL_f', 'Mass_i', 'Mass_f', 'Teff', 'logg', 'Mass',
        'logLstar', 'Radius', 'logLbin', 'Lcorr', 'frac_M', 'frac_L')
    log.write(data + '\n')
    log.write(
        '    -----------------------------------------------------------------------------------------------------------------------------\n'
    )

    for i in range(nstars):
        #if phase[i] != 'rgb_cn':
        #    file_flux = pfant12.set_stspec_filename(feh, afe, lmin, lmax, Teffs[i],
        #                               loggs[i], CFe, NFe, OFe, MgFe, SiFe,
        #                                CaFe, TiFe, NaFe, AlFe, BaFe, EuFe)
        #else:
        #    file_flux = pfant12.set_stspec_filename(feh, afe, lmin, lmax, Teffs[i],
        #                               loggs[i], CFe, NFe, OFe, MgFe, SiFe,
        #                                CaFe, TiFe, NaFe, AlFe, BaFe, EuFe)

        file_flux = ret.set_spectra_name(Teffs[i], loggs[i], Z)
        print(
            '-------------------------------------------------------------------------------'
        )
        print('Implementing file: ' + file_flux)
        #---------------------------------
        # CONVOLUTION STELLAR SPECTRA
        #---------------------------------

        os.system('cp ./Stellar_Spectra/' + file_flux + ' ./')
        os.system('mv ./' + file_flux + ' st_spectra')

        #os.system('rm -f st_spectra')
        #print(file_flux)
        #os.system('cp ' + file_flux + ' temp.in')
        #nulbad_call = "nulbad --fn_flux temp.in --fn_cv st_spectra --flam T" \
        #    + " --pat %f --fn_progress progress.txt --fwhm %f" % (dl, fwhm)

        #os.system(nulbad_call)

        #---------------------------------
        # READ CONVOLVED STELLAR SPECTRA
        #---------------------------------
        tt = np.loadtxt('st_spectra')
        lamda = tt[:, 0]
        hfilt = pyphot.get_library()['GROUND_BESSELL_H']
        units = 1
        #units = 1e-8 * 4 * np.pi  # erg / (s cm2 cm ster) --> erg / (s cm2 A)
        # Scale flux according to the star surface area and normalize by Lsun
        st_flux = tt[:, 1] * (
            lumis[i]
        )  #*hfilt.get_flux(tt[:,0],tt[:,1])# * area[i] / Lsun # erg / (s cm2 A) --> Lsun / A

        if i == 0:
            ssp_flux = np.zeros(len(st_flux))

        # Number of stars formed between mass_breaks[i] and mass_breaks[i+1] (per unit of mass):
        #  sum(phi_m * dm) = phi_mdm
        dm_break = mass_breaks[i + 1] - mass_breaks[i]
        #        for k in range(len(mass_breaks)):
        #            print(str(mass_breaks[k]))
        dm = dm_break / 1000
        if dm > 0.01:
            dm = 0.001

#        ndm = round((mass_breaks[i+1] - mass_breaks[i]) / dm)
        masses_temp = np.arange(mass_breaks[i], mass_breaks[i + 1], dm)

        iso_L_temp = 10**interp(isomass, masses_temp, isologLH)

        #        print(str(iso_L_temp))
        phi_mdm = 0
        mass_bin = 0
        L_bin = 0
        L_star_bin = 0

        #---------------------------------
        # IMF: KROUPA
        #---------------------------------
        if imf == 'kroupa':
            for j in range(len(masses_temp)):
                cc = 1
                if masses_temp[j] >= 1.0 and masses_temp[j] < 100: x = 2.7
                if masses_temp[j] >= 0.5 and masses_temp[j] < 1:
                    x = 2.3
                    cc = ((1.0**(-2.7)) / 1.0**(-2.3))
                if masses_temp[j] >= 0.08 and masses_temp[j] < 0.5:
                    x = 1.3
                    cc = ((0.5**(-2.3)) / 0.5**(-1.3)) * \
                        ((1.0**(-2.7)) / 1.0**(-2.3))
                if masses_temp[j] >= 0.01 and masses_temp[j] < 0.08:
                    x = 0.3
                    cc = (0.08**(-1.3)) / 0.08**(-0.3) * (
                        (0.5**(-2.3)) / 0.5**(-1.3)) * (
                            (1.0**(-2.7)) / 1.0**(-2.3))
                #Hmag = inter(masses_temp[j])
                phi_m = masses_temp[j]**(-x) * cc
                phi_mdm = phi_mdm + phi_m * dm
                mass_bin = mass_bin + masses_temp[j] * phi_m * dm
                L_bin = L_bin + iso_L_temp[j] * phi_m * dm  # * Hmag
                L_star_bin = L_star_bin + lumis[i] * phi_m * dm  #* Hmag

        #---------------------------------
        # IMF: SALPETER
        #---------------------------------
        if imf == 'salpeter':
            x = 2.3
            for j in range(len(masses_temp)):
                phi_m = masses_temp[j]**(-x)
                phi_mdm += phi_m * dm
                mass_bin += masses_temp[j] * phi_m * dm
                L_bin += iso_L_temp[j] * phi_m * dm
                L_star_bin += lumis[i] * phi_m * dm

        #---------------------------------
        # IMF: UNIMODAL (SLOPE=x)
        #---------------------------------
        if imf == 'unimodal':
            x = slope
            for j in range(len(masses_temp)):
                phi_m = masses_temp[j]**(-x)
                phi_mdm += phi_m * dm
                mass_bin += masses_temp[j] * phi_m * dm
                L_bin += iso_L_temp[j] * phi_m * dm
                L_star_bin += lumis[i] * phi_m * dm

        #---------------------------------
        # SUM(S * PHI * DM)
        #---------------------------------
        L_corr[i] = L_bin / L_star_bin
        ssp_flux += st_flux * L_corr[i] * phi_mdm
        fraction_M[i] = mass_bin
        #sum_flux[i] = np.sum(st_flux) * (max(lamda) - min(lamda))
        fraction_L[i] = L_bin

    # normalization: sum(mass * phi_m * dm) = 1 Msun
    L_SSP = np.sum(fraction_L)
    fraction_L = fraction_L / L_SSP
    ssp_flux = ssp_flux / np.sum(fraction_M)
    ssp_flux = ssp_flux  # * hfilt.get_flux(lamda,ssp_flux) # Hband unnormalisation and erg conversion
    fraction_M = fraction_M / np.sum(fraction_M)

    #---------------------------------
    # WRITE INFO IN LOGFILE
    #---------------------------------
    for i in range(nstars):
        data = '%5i%10.4f%10.4f%10.4f%10.4f%7.0f%7.2f%10.4f%10.4f%10.4f%10.4f%10.4f%10.4f%10.2f' % (i+1, np.log10(lum_breaks[i]),\
                np.log10(lum_breaks[i+1]), mass_breaks[i], mass_breaks[i+1],\
                Teffs[i], loggs[i], masses[i], np.log10(lumis[i]),\
                radii[i] / Rsun, np.log10(L_SSP * fraction_L[i]), L_corr[i],\
                fraction_M[i]*100, fraction_L[i]*100)
        log.write(data + '\n')

    log.write('----------------------\n')

    data = [
        'logL_i, logL_f   -> lower and upper luminosity limits of the' +
        ' bin (log[L/Lsun])',
        'Mass_i, Mass_f   -> lower and upper mass limits of the bin (Msun)',
        'Teff, logg       -> stelar parameter of the synthetic stellar ' +
        'spectrum representing the stars within bin',
        'Mass, logLstar   -> mass and luminosity of the star with' +
        ' Teff, logg',
        'Radius           -> radius of a star with Teff, logg (Rsun,' +
        ' Radius = sqrt[L / (4 * pi * sb * Teffs**4)])',
        'logLbin          -> total luminosity of the bin (log[L/Lsun])',
        'Lcorr            -> luminosity correction factor taking into ' +
        'account the variation of stellar luminosities',
        '                      within bin ( = sum[L(m)*phi_m*dm]' +
        ' / sum[Lstar*phi_m*dm])',
        'frac_M           -> fraction of mass in bin i (%, frac_M = ' +
        '(Mbin/M_SSP)*100)',
        'frac_L           -> fraction of light in bin i (%, frac_L = ' +
        '(Lbin/L_SSP)*100)'
    ]
    for i in range(len(data)):
        log.write(data[i] + '\n')

    datum = open(file_ssp, 'w+')
    data = '%7.2f%13.5e' % (lamda[i], ssp_flux[i])
    datum.write(data + '\n')

    for i in range(1, len(lamda)):
        data = '%7.2f%13.5e' % (lamda[i], ssp_flux[i])
        datum.write(data + '\n')

    os.system('cp SSP_Spectra ' + file_ssp)

    log.write('\n    SSP spectra -->  ' + file_ssp)
    log.write(
        '----------------------------------------------------------------\n')
    log.close()
    datum.close()

    print(
        '-------------------------------------------------------------------------------'
    )
    print('SSP succesfully created and saved in --> ' + file_ssp)

    #---------------------------------
    # PLOT SSP SPECTRA
    #---------------------------------

    t = np.loadtxt(file_ssp)
    t[:, 1] = t[:, 1] / hfilt.get_flux(t[:, 0], t[:, 1])  #Lsun
    #t[:,1] = 3e-9*t[:,1]/(t[:,0]*10**-4)**2
    #    t1 = np.zeros(len(t))
    #    t2 = np.zeros(len(t))
    #    for i in range(len(t)):
    #        t1[i] = t[i][0]
    #        t2[i] = t[i][1]
    plt.figure()
    font = 16
    #    plt.yscale('log')
    plt.xlabel(r'$\lambda (\AA)$', fontsize=font)
    plt.ylabel(r'$F/F_{12230}$', fontsize=font)
    #plt.savefig(file_fig)

    t_2 = np.loadtxt(
        './DATA/MarS/MARv_SAL_sed_NOCS_H_Z_0.029999999_Tg_1.0000000e+10')
    t_3 = np.loadtxt(
        './DATA/GirS/GIRv_SAL_sed_NOCS_H_Z_0.029999999_Tg_1.0000000e+10')
    t_4 = np.loadtxt(
        './DATA/BaSS/BASv_SAL_sed_NOCS_H_Z_0.029999999_Tg_1.0000000e+10')
    t_2[:, 0] = t_2[:, 0] * 10000
    t_3[:, 0] = t_3[:, 0] * 10000
    t_4[:, 0] = t_4[:, 0] * 10000

    ax = plt.subplot(111)

    lwid = 0.3

    inter = interp1d(t_2[:, 0], t_2[:, 1])
    t_2norm = inter(12230)
    t_2[(18050 - 9350) + 150:(18800 - 9350) + 150, 1] = np.nan
    ax.plot(t_2[:, 0],
            t_2[:, 1] / inter(12230),
            'r',
            linewidth=lwid,
            label='MarS Model')
    er1 = t_2[:, 1] / inter(12230)

    inter = interp1d(t_3[:, 0], t_3[:, 1])
    t_3[(18050 - 9350) + 150:(18800 - 9350) + 150, 1] = np.nan
    ax.plot(t_3[:, 0],
            t_3[:, 1] / inter(12230),
            'g',
            linewidth=lwid,
            label='GirS Model')
    er2 = t_3[:, 1] / inter(12230)

    inter = interp1d(t_4[:, 0], t_4[:, 1])
    t_4[(18050 - 9350) + 150:(18800 - 9350) + 150, 1] = np.nan
    ax.plot(t_4[:, 0],
            t_4[:, 1] / inter(12230),
            'c',
            linewidth=lwid,
            label='BaSS Model')
    er3 = t_4[:, 1] / inter(12230)

    inter = interp1d(t[:, 0], t[:, 1])
    t[(18050 - 9350) + 150:(18800 - 9350) + 150, 1] = np.nan
    # Removal of telluric lines from plots and calculations
    ax.plot(t[:, 0],
            t[:, 1] / inter(12230),
            'b',
            linewidth=lwid,
            label='Our Model')

    #####################################
    ### Other plots (error analysis) ####
    #####################################

    #avg = (er1 + er2 + er3)/3
    #  # Average of all other models
    #avg_er = abs(t[:,1]/inter(12230) - avg)
    #  # Difference between our model and the average
    #ax.fill_between(t[:,0], t[:,1]/inter(12230) + avg_er, t[:,1]/inter(12230) - avg_er,
    #               facecolor = 'c', alpha = 0.5, label = 'Average comparative error (BaSS/GirS/MarS)')
    # # Filled-colour relative errors (assumes the same error in both vertical directions and no error in horizontal directions

    #max_err = np.amax([np.subtract(er1, t[:,1]/inter(12230)),
    #                   np.subtract(er2, t[:,1]/inter(12230)),
    #                   np.subtract(er3, t[:,1]/inter(12230))], axis = 0)
    #max_err[max_err < 0] = 0
    #  #Use with abs to obtain the absolute differences in the thin differences plot. Otherwise, remove abs to obtain the filled in differences plots.

    #min_err = np.amin([np.subtract(er1, t[:,1]/inter(12230)),
    #                   np.subtract(er2, t[:,1]/inter(12230)),
    #                   np.subtract(er3, t[:,1]/inter(12230))], axis = 0)
    #min_err[min_err > 0] = 0
    #  # Finding the upper difference limit for our data compared to other models (switch the < and > to the opposite opperator to find the lower difference limits).

    #ax.fill_between(t[:,0], t[:,1]/inter(12230) + max_err, t[:,1]/inter(12230),
    #                facecolor = 'c', alpha = 0.35, label = 'Maximum difference (BaSS/GirS/MarS)')
    #ax.fill_between(t[:,0], t[:,1]/inter(12230), t[:,1]/inter(12230) + min_err,
    #                facecolor = 'c', alpha = 0.35)

    #max_err = np.amax([np.subtract(er1, t[:,1]/inter(12230)),
    #                   np.subtract(er2, t[:,1]/inter(12230)),
    #                   np.subtract(er3, t[:,1]/inter(12230))], axis = 0)
    #max_err[max_err > 0] = 0

    #min_err = np.amin([np.subtract(er1, t[:,1]/inter(12230)),
    #                   np.subtract(er2, t[:,1]/inter(12230)),
    #                   np.subtract(er3, t[:,1]/inter(12230))], axis = 0)
    #min_err[min_err < 0] = 0
    #  # Finding the upper difference limit for our data compared to other models (switch the < and > to the opposite opperator to find the lower difference limits).

    #ax.fill_between(t[:,0], t[:,1]/inter(12230) + max_err, t[:,1]/inter(12230),
    #               facecolor = 'r', alpha = 0.35, label = 'Minimum difference (BaSS/GirS/MarS)')
    #ax.fill_between(t[:,0], t[:,1]/inter(12230), t[:,1]/inter(12230) + min_err,
    #               facecolor = 'r', alpha = 0.35)

    #ax2 =  plt.subplot(111)
    #max_err[(18050-9350)+150:(18800-9350)+150] = np.nan
    #    # Position of telluric gap
    #gauss = ndimage.gaussian_filter(max_err[max_err!=np.nan], sigma = 1)
    #    # Applies a basic Gaussian filter of sigma=1 to the errors, to aliken them to our dateset from Meneses-Goyita 2015.
    #    # Also attempts to ignore the telluric line region
    #ax2.plot(t[:,0][max_err!=np.nan], gauss, 'k', linewidth = lwid)
    #plt.xlabel(r'$\lambda (\AA)$', fontsize = font)
    #plt.ylabel(r'$\Delta F/F_{12230}$', fontsize = font)
    # 	# A basic line plot for just errors, to be fitted below the standard plots

    ax.legend(fontsize=font)
    ax.tick_params(axis='both', labelsize=font)
    print()
    plt.show(block=True)
示例#18
0
import seaborn as sns
sns.set_style('ticks')

# Imports
import numpy as np
import pandas as pd
from astropy.io import fits
from scipy import interpolate
from astropy.convolution import Gaussian1DKernel, Gaussian2DKernel, convolve
from scipy.signal import medfilt
import astropy.units as u
from astropy.time import Time
from util import *

import pyphot
lib = pyphot.get_library()

import matplotlib as mpl
from matplotlib.ticker import FormatStrFormatter
params = {
    'axes.labelsize': 12,
    'font.size': 12,
    'legend.fontsize': 12,
    'xtick.labelsize': 12,
    'ytick.labelsize': 12,
    'text.usetex': True,
    'figure.figsize': [8, 8 / 1.61]
}
mpl.rcParams.update(params)

示例#19
0
import numpy as np
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
import pyphot

"""
Convolve RT modelled SED with the observed SED filter profiles and produce files of the model SEDs at each model scenarios with the SED image

"""


Source_Distance = 268 * pc
OutPutFiles = ["Model_FourShells_EvenMass.rtout", "Model_FourShells_UnEvenMass.rtout", "Model_InnerShell_Only.rtout", "Model_OuterShell_Only_K2010.rtout", "Model_OuterShell_Only_M2010.rtout", "Model_ShellThree_Only.rtout", "Model_ShellTwo_Only.rtout"] #RT output files

#Getting Instrument Filters for Filter Convolution of the SED
Filter_Library = pyphot.get_library(fname="Ampere_FiterProfile_Library.hdf5")

#Filter names of the photometric points in my SED as given in the Ampere Filter porifles library. 
Filter_Names = np.array(['GAIADR2_Gbp', 'GAIADR2_G', 'GAIADR2_Grp', '2MASS_J', '2MASS_H', '2MASS_Ks', 'COBE_DIRBE_1.25', 'COBE_DIRBE_2.2', 'COBE_DIRBE_3.5', 'COBE_DIRBE_4.9', 'WISE_RSR_W3', 'WISE_RSR_W4', 'AKARI_S9W', 'AKARI_L18W', 'AKARI_FIS_N60', 'AKARI_FIS_WIDES', 'AKARI_FIS_WIDEL', 'IRAS_12', 'IRAS_25', 'IRAS_60', 'IRAS_100', 'HERSCHEL_PACS_BLUE', 'HERSCHEL_PACS_RED', 'HERSCHEL_SPIRE_PSW', 'HERSCHEL_SPIRE_PMW', 'HERSCHEL_SPIRE_PLW', 'JCMT_SCUBA2_450', 'JCMT_SCUBA2_850'])


for filename in OutPutFiles:

	model = ModelOutput(filename)
	sed = model.get_sed(group=0, inclination='all', aperture=-1, distance=Source_Distance, units='Jy')

	#print(sed.wav)
	#print(sed.val)
	
	RT_SED_wavelengths = sed.wav #sed.wav = list of wavelengths created based on the limits (100, 0.3, 1200.) given during SED creation in RT modelling stage. Pre Convolution with Filters
	RT_SED_Fluxes = sed.val[0] #Fluxes of the SED derived during RT modelling. Pre Convolution with Filters #[0] - Hyperion has gives a list of arrays instead of an array. So we need to pick the zeroth element array even if the rest are empty.