예제 #1
0
def convertMeanPascucciBenchmarkOpticalProps(inFilePaths, outFilePaths):
    # read the input file
    w, ss, se = np.loadtxt(inFilePaths[0],
                           usecols=(0, 1, 2),
                           comments=';',
                           unpack=True)

    # determine absorption cross section from scattering cross section and total cross section
    sa = se - ss

    # convert wavelength units from micron to m, cross sections from arbitrary units to reasonable range
    w *= 1e-6
    sa *= 7e-13
    ss *= 7e-13

    # setup isotropic scattering
    g = np.zeros_like(w)

    # determine dust mass so that kappa values have reasonable order of magnitude
    mu = np.zeros_like(w) + 1.5e-29  # arbitrary value, in kg/H

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w],
                          ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'], [sa, ss, g, mu])
def convertMinOpticalProps(inFilePaths, outFilePaths):
    # discover the input files and parse the bulk density
    infiles = sorted(glob.glob(inFilePaths[0]))
    rhobulk = float(inFilePaths[1].split("/")[-1])

    # get the grain size grid from the filenames and convert to m
    a = np.array([
        float(infile.split("/")[-1].split("_")[-1][0:-8]) for infile in infiles
    ]) * 1e-6

    # get the wavelength grid from the first file and convert to m
    w = np.loadtxt(infiles[0], usecols=(0, ), unpack=True) * 1e-6

    # allocate arrays
    Qabs = np.zeros((len(w), len(a)))
    Qsca = np.zeros((len(w), len(a)))
    g = np.zeros((len(w), len(a)))

    # read all data into memory
    for i in range(len(a)):
        Qabs[:, i], Qsca[:, i], g[:, i] = np.loadtxt(infiles[i],
                                                     usecols=(2, 3, 4),
                                                     unpack=True)

    # convert kappa's in cm^2/g to m2/kg and then to Q's  (need a in m!)
    Qabs *= 0.1 * (4. / 3. * a * rhobulk)
    Qsca *= 0.1 * (4. / 3. * a * rhobulk)

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['a', 'lambda'], ['m', 'm'],
                          ['log', 'log'], [a, w], ['Qabs', 'Qsca', 'g'],
                          ['1', '1', '1'], ['log', 'log', 'lin'],
                          [Qabs.T, Qsca.T, g.T])
예제 #3
0
def convertMeanPinteBenchmarkMuellerMatrix(inFilePaths, outFilePaths):
    # set the wavelength grid to include wavelengths at the extreme ends of the wavelength range in opacity.dat
    w = np.array((0.1, 1., 3000.))

    # read the input file
    theta, S11, S12, S33, S34 = np.loadtxt(inFilePaths[0],
                                           usecols=(0, 1, 2, 3, 4),
                                           unpack=True)

    # convert units from micron to m and from degrees to radians
    # (Sxx values are always used relative to S11 so we don't need to worry about units)
    w *= 1e-6
    theta *= np.pi / 180.

    # convert input values from Legacy convention to IAU convention
    S34 = -S34

    # copy the Mueller matrix coefficients for each wavelength
    S11 = np.tile(S11, (len(w), 1))
    S12 = np.tile(S12, (len(w), 1))
    S33 = np.tile(S33, (len(w), 1))
    S34 = np.tile(S34, (len(w), 1))

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda', 'theta'], ['m', 'rad'],
                          ['log', 'lin'], [w, theta],
                          ['S11', 'S12', 'S33', 'S34'], ['1', '1', '1', '1'],
                          ['lin', 'lin', 'lin', 'lin'], [S11, S12, S33, S34])
예제 #4
0
def convertMarastonSEDFamily(inFilePaths, outFilePaths):
    for inFilePath, outFilePath in zip(inFilePaths, outFilePaths):

        # read and concatenate all column text files for this model
        # (all info is in the files; the file names themselves are thus redundant)
        tv, Zv, wv, Lv = np.concatenate([ np.loadtxt(path) for path in glob.glob(inFilePath) ]).T

        # determine the grid points for each axis as the sorted list of unique values
        # also get the indices in the unique array that can be used to reconstruct the original vector
        w, wi = np.unique(wv, return_inverse=True)
        Z, Zi = np.unique(Zv, return_inverse=True)
        t, ti = np.unique(tv, return_inverse=True)

        # allocate hypercube for the luminosities and copy the input luminosities to it
        # values that are not provided in the input remain at zero
        L = np.zeros((len(w),len(Z),len(t)))
        L[wi,Zi,ti] = Lv

        # convert from input units: Age(Gyr)  [Z/H]  lambda(AA)  L_{lambda}(erg/s/AA)
        w *= 1e-10
        Z = 0.02 * 10**Z
        t *= 1e9
        L *= 1e3

        # write stored table
        stab.writeStoredTable(outFilePath,
                              ['lambda','Z','t'], ['m','1','yr'], ['log','log','log'], [w,Z,t],
                              ['Llambda'], ['W/m'], ['log'], [L])
예제 #5
0
def convertMeanTrustBenchmarkMuellerMatrix(inFilePaths, outFilePaths):

    # get a list of the input file names, in order of increasing theta
    paths = sorted(glob.glob(inFilePaths[0]))

    # determine the scattering angle grid from the file names
    theta = np.array([float(path.split('_')[-1][0:3]) for path in paths])

    # determine the wavelength grid from the first file
    w = np.loadtxt(paths[0], usecols=(0, ))

    # allocate arrays for the Mueller matrix coefficients
    S11 = np.zeros((len(w), len(theta)))
    S12 = np.zeros((len(w), len(theta)))
    S33 = np.zeros((len(w), len(theta)))
    S34 = np.zeros((len(w), len(theta)))

    # read the Mueller matrix coefficients from each file
    for index in range(len(paths)):
        S11[:, index], S12[:, index], S33[:,
                                          index], S34[:, index] = np.loadtxt(
                                              paths[index],
                                              usecols=(1, 2, 3, 4),
                                              unpack=True)

    # convert units from micron to m and from degrees to radians
    # (Sxx values are always used relative to S11 so we don't need to worry about units)
    w *= 1e-6
    theta *= np.pi / 180.

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda', 'theta'], ['m', 'rad'],
                          ['log', 'lin'], [w, theta],
                          ['S11', 'S12', 'S33', 'S34'], ['1', '1', '1', '1'],
                          ['lin', 'lin', 'lin', 'lin'], [S11, S12, S33, S34])
def convertDustemOpticalProps(inFilePaths, outFilePaths):

    # ------------ read wavelengths file ------------

    infile = stab.TokenizedFile(open(inFilePaths[0]))

    # skip header lines, read the wavelength grid size, and read the wavelengths
    infile.skipHeaderLines()
    Nlambda = int(infile.next())
    w = np.array([float(infile.next()) for k in range(Nlambda)])

    # ------------ read efficiencies file ------------

    infile = stab.TokenizedFile(open(inFilePaths[1]))

    # first block: skip header lines, read the grain size grid size, and read the grain sizes
    infile.skipHeaderLines()
    Na = int(infile.next())
    a = np.array([float(infile.next()) for i in range(Na)])

    # second block: skip header lines, and read the absorption efficiencies
    infile.skipHeaderLines()
    Qabs = np.array([[float(infile.next()) for i in range(Na)]
                     for k in range(Nlambda)]).T

    # third block: skip header lines, and read the scattering efficiencies
    infile.skipHeaderLines()
    Qsca = np.array([[float(infile.next()) for i in range(Na)]
                     for k in range(Nlambda)]).T

    # ------------ read scattering asymmetry file ------------

    infile = stab.TokenizedFile(open(inFilePaths[2]))

    # first block: skip header lines, read the grain size grid size, and skip the grain sizes
    infile.skipHeaderLines()
    if Na != int(infile.next()):
        raise ValueError(
            "Efficiencies and scattering asymmetry files have different number of grain sizes"
        )
    for i in range(Na):
        infile.next()

    # second block: skip header lines, and read the scattering asymmetry parameters
    infile.skipHeaderLines()
    g = np.array([[float(infile.next()) for i in range(Na)]
                  for k in range(Nlambda)]).T

    # ------------ write stored table ------------

    stab.writeStoredTable(outFilePaths[0], ['a', 'lambda'], ['m', 'm'],
                          ['log', 'log'], [a * 1e-6, w * 1e-6],
                          ['Qabs', 'Qsca', 'g'], ['1', '1', '1'],
                          ['log', 'log', 'lin'], [Qabs, Qsca, g])
예제 #7
0
def convertCastelliKuruczSEDFamily(inFilePaths, outFilePaths):
    # return metallicity value for a given file path
    def toM(path):
        name = path.split('/')[-1]
        return (-0.1 if name[2]=='m' else 0.1)*(float(name[3:5]))
    # return temperature value for a given file path
    def toT(path):
        return float(path.split('/')[-1].split('_')[1].split('.')[0])
    # return logg value for a given column name
    def toG(name):
        return 0.1*float(name[1:])

    # get a list of all the file paths and file names
    paths = sorted(glob.glob(inFilePaths[0]))

    # get the metallicity grid and the temperature grid from the filenames
    M = np.unique([ toM(path) for path in paths ])
    T = np.unique([ toT(path) for path in paths ])

    # get the wavelength grid and the gravity grid from the first file
    data = fits.open(paths[0])[1].data  # the tables are in the first extension
    w = data['WAVELENGTH']
    g = np.unique([ toG(data.columns[i].name) for i in range(1, len(data.columns)) ])

    # allocate the flux array
    F = np.zeros((len(w), len(M), len(T), len(g)))     # indices k, m, t, n

    # read the data from each file
    for path in paths:
        # get the metallicity index and the temperature index from the filename and the grid
        m = M.tolist().index(toM(path))
        t = T.tolist().index(toT(path))

        # loop over each column in the file (skipping the first wavelength column)
        data = fits.open(path)[1].data
        for i in range(1, len(data.columns)):
            # get the gravity index from the column name and the grid
            name = data.columns[i].name
            n = g.tolist().index(toG(name))
            # get the fluxes
            F[:,m,t,n] = data[name]

    # convert units
    w *= 1e-10          # from Angstrom to m
    Z = 0.02 * 10**M    # from [M/H] to fraction
    g = 10**(g-2)       # from log_g (in cm/s2) to g (in m/s2)
    F *= 1e7            # from erg/s/cm2/A to W/m2/m

    # write stored table
    stab.writeStoredTable(outFilePaths[0],
                          ['lambda','Z','Teff','g'], ['m','1','K','m/s2'], ['log','log','log','log'], [w,Z,T,g],
                          ['Flambda'], ['W/m2/m'], ['log'], [F])
def convertGenericOpticalProps(inFilePaths, outFilePaths):
    # convert options to Boolean
    reverse, skip1, skip2, skip3 = [
        b.lower().endswith("/true") for b in inFilePaths[1:5]
    ]

    # open the input file and skip the header
    infile = stab.TokenizedFile(open(inFilePaths[0]))
    infile.skipHeaderLines()

    # read the grid size
    Na = int(infile.next())
    infile.skipToEndOfLine()
    Nlambda = int(infile.next())
    infile.skipToEndOfLine()

    # allocate arrays
    w = np.zeros(Nlambda)
    a = np.zeros(Na)
    Qabs = np.zeros((Na, Nlambda))
    Qsca = np.zeros((Na, Nlambda))
    g = np.zeros((Na, Nlambda))

    # read the data blocks
    for i in range(Na):
        a[i] = float(infile.next())
        infile.skipToEndOfLine()

        for k in range(Nlambda):
            if skip1: infile.next()
            w[k] = float(infile.next())
            if skip2: infile.next()
            Qabs[i, k] = float(infile.next())
            Qsca[i, k] = float(infile.next())
            if skip3: infile.next()
            g[i, k] = float(infile.next())
            infile.skipToEndOfLine()

    # reverse the wavelengths if needed
    if reverse:
        w = np.flip(w, 0)
        Qabs = np.flip(Qabs, 1)
        Qsca = np.flip(Qsca, 1)
        g = np.flip(g, 1)

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['a', 'lambda'], ['m', 'm'],
                          ['log', 'log'], [a * 1e-6, w * 1e-6],
                          ['Qabs', 'Qsca', 'g'], ['1', '1', '1'],
                          ['log', 'log', 'lin'], [Qabs, Qsca, g])
예제 #9
0
def convertDraineEnthalpies(inFilePaths, outFilePaths):
    # get the arguments
    inFilePath = inFilePaths[0]
    outFilePath = outFilePaths[0]
    bulkdensity = float(inFilePaths[1].split("/")[-1])

    # read the input file
    T, h = np.loadtxt(inFilePath, usecols=(0, 1), unpack=True)

    # convert from J/kg to J/m3
    h *= bulkdensity

    # write stored table
    stab.writeStoredTable(outFilePath, ['T'], ['K'], ['log'], [T], ['h'],
                          ['J/m3'], ['log'], [h])
예제 #10
0
def createQuasarSED(inFilePaths, outFilePaths):
    # wavelength grid in micron
    w = np.array((0.001, 0.01, 0.1, 5., 1000.))
    L = np.zeros_like(w)

    # calculate SED at grid points, with arbitrary first value
    L[0] = 1.
    L[1] = L[0] * (w[1]/w[0])**0.2
    L[2] = L[1] * (w[2]/w[1])**-1.0
    L[3] = L[2] * (w[3]/w[2])**-1.5
    L[4] = L[3] * (w[4]/w[3])**-4.0

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w*1e-6],
                                           ['Llambda'], ['W/m'], ['log'], [L])
예제 #11
0
def writeBroadBands(inFilePaths, outFilePaths):

    # loop over all builtin bands
    for name in bnd.builtinBandNames():
        band = bnd.BroadBand(name)

        # construct the output file path based on the band ID
        outFilePath = outFilePaths[0].replace("*",name)

        # get the transmission curve
        w, T = band.transmissionCurve()

        # write stored table
        stab.writeStoredTable(outFilePath, ['lambda'], ['m'], ['lin'], [w.to_value(u.m)],
                                           ['T'], ['1'], ['lin'], [T.to_value(u.m**(-1))])
예제 #12
0
def convertBruzualCharlotSEDFamily(inFilePaths, outFilePaths):
    for inFilePath, outFilePath in zip(inFilePaths, outFilePaths):

        # read the age and wavelength grids from one of the files
        with gzip.open(inFilePath.replace("MM","22"), 'rt') as infile:
            tokens = stab.TokenizedFile(infile)
            Nt = int(tokens.next())
            t = np.array([ float(tokens.next()) for p in range(Nt) ])
            while tokens.next()!="Padova": pass
            for i in range(3): tokens.skipLine()
            Nw = int(tokens.next())
            w = np.array([ float(tokens.next()) for k in range(Nw) ])

        # initialize the metallicity grid and the corresponding filename codes
        Z = np.array((0.0001, 0.0004, 0.004, 0.008, 0.02, 0.05))
        NZ = len(Z)
        Zcode = [ "22", "32", "42", "52", "62", "72" ]

        # allocate hypercube for the luminosities
        L = np.zeros((Nw,NZ,Nt))    # indices k, m, p

        # read the luminosities from each of the files
        for m in range(NZ):
            with gzip.open(inFilePath.replace("MM",Zcode[m]), 'rt') as infile:
                tokens = stab.TokenizedFile(infile)
                # skip the ages and wavelengths
                while tokens.next()!="Padova": pass
                for i in range(3): tokens.skipLine()
                for i in range(Nw+1): tokens.next()
                # iterate over ages
                for p in range(Nt):
                    # read luminosities for each age
                    if int(tokens.next()) != Nw:
                        raise ValueError("Number of luminosities does not match number of wavelengths")
                    for k in range(Nw):
                        L[k, m, p] = float(tokens.next())
                    # skip intervening dummy data
                    for i in range(int(tokens.next())): tokens.next()

        # write stored table, converting from input units
        # wavelengths (Angstrom), metallicities (dimensionless), ages (years), luminosities (Lsun/Angstrom)
        Angstrom = 1e-10
        Lsun = 3.839e26
        t[0] = 1  # avoid zero values in a logarithmically scaled axis
        stab.writeStoredTable(outFilePath,
                              ['lambda','Z','t'], ['m','1','yr'], ['log','log','log'], [w*Angstrom,Z,t],
                              ['Llambda'], ['W/m'], ['log'], [L*(Lsun/Angstrom)])
예제 #13
0
def convertStarburst99SEDFamily(inFilePaths, outFilePaths):
    # open the fits file
    hdul = fits.open(inFilePaths[0])

    # get the axes information
    table = hdul['AXES'].data
    w = table['lambda'][table['lambda']>0]
    Z = table['metallicity'][table['metallicity']>0]
    t = table['time'][table['time']>0]

    # get the luminosities (stored in file as log10)
    L = 10**hdul['SED'].data

    # write stored table
    stab.writeStoredTable(outFilePaths[0],
                          ['lambda','Z','t'], ['m','1','yr'], ['log','log','log'], [w,Z,t],
                          ['Llambda'], ['W/m'], ['log'], [L])
예제 #14
0
def convertMeanZubkoOpticalProps(inFilePaths, outFilePaths):
    # read the input file
    w, se, a, g = np.loadtxt(inFilePaths[0], usecols=(0, 3, 4, 5), unpack=True)

    # convert units from micron to m and from cm^2/H to m^2/H
    w *= 1e-6
    se *= 1e-4

    # calculate absorption and scattering cross section from total cross section and albedo
    sa = (1 - a) * se
    ss = a * se

    # determine dust mass
    mu = np.zeros_like(w) + 1.44e-29  # in kg/H

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w],
                          ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'], [sa, ss, g, mu])
예제 #15
0
def convertMeanDraineLiOpticalProps(inFilePaths, outFilePaths):
    # read the input file
    w, sa, ss, g = np.loadtxt(inFilePaths[0],
                              usecols=(0, 1, 2, 5),
                              unpack=True)

    # convert units from micron to m and from cm^2/H to m^2/H
    w *= 1e-6
    sa *= 1e-4
    ss *= 1e-4

    # determine dust mass
    mu = np.zeros_like(w) + (5.4e-4 + 5.4e-4 + 1.8e-4 + 2.33e-3 +
                             8.27e-3) * 1.67262178e-27

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w],
                          ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'], [sa, ss, g, mu])
예제 #16
0
def createMeanIvezicBenchmarkOpticalProps(inFilePaths, outFilePaths):

    # wavelength grid in micron and corresponding optical properties
    w = np.array((1e-3, 1., 1e4))
    sa = np.array((1., 1., 1e-4))  # 1/lambda    for lambda>1
    ss = np.array((1., 1., 1e-16))  # 1/lambda^4  for lambda>1
    g = np.array((0., 0., 0.))  # isotropic

    # convert wavelength units from micron to m, cross sections from arbitrary units to reasonable range
    w *= 1e-6
    sa *= 2e-26
    ss *= 2e-26

    # determine dust mass so that kappa values have reasonable order of magnitude
    mu = np.zeros_like(w) + 1.5e-29  # arbitrary value, in kg/H

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w],
                          ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'], [sa, ss, g, mu])
예제 #17
0
def convertMeanPinteBenchmarkOpticalProps(inFilePaths, outFilePaths):
    # read the input file
    w, ka, ks = np.loadtxt(inFilePaths[0], usecols=(0, 2, 3), unpack=True)

    # convert units from micron to m and from cm^2/g to m^2/kg
    w *= 1e-6
    ka *= 0.1
    ks *= 0.1

    # set arbitrary dust mass per hydrogen nucleon, and convert kappa's to cross sections per hydrogen nucleon
    mu = np.zeros_like(w) + 1.5e-29  # arbitrary value, in kg/H
    sa = ka * mu
    ss = ks * mu

    # set all scattering asymmetry values to the (only) published value, i.e. the value for 1 micron;
    # these values won't be used for the benchmark because the Mueller matrix is used instead
    g = np.zeros_like(w) + 0.6296066

    # write stored table
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'], [w],
                          ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'], [sa, ss, g, mu])
예제 #18
0
def convertMeanInterstellarOpticalProps(inFilePaths, outFilePaths):
    # read the input file
    w, a, g, se = np.loadtxt(inFilePaths[0],
                             usecols=(0, 1, 2, 3),
                             skiprows=80,
                             unpack=True)

    # convert units from micron to m and from cm^2/H to m^2/H
    w *= 1e-6
    se *= 1e-4

    # calculate absorption and scattering cross section from total cross section and albedo
    sa = (1 - a) * se
    ss = a * se

    # determine dust mass
    mu = np.zeros_like(w) + 1.870e-29  # in kg/H

    # write stored table -- reverse order because input file has decreasing wavelengths
    stab.writeStoredTable(outFilePaths[0], ['lambda'], ['m'], ['log'],
                          [w[::-1]], ['sigmaabs', 'sigmasca', 'g', 'mu'],
                          ['m2/H', 'm2/H', '1', 'kg/H'],
                          ['log', 'log', 'lin', 'lin'],
                          [sa[::-1], ss[::-1], g[::-1], mu[::-1]])
예제 #19
0
def convertDustemEnthalpies(inFilePaths, outFilePaths):
    for inFilePath, outFilePath in zip(inFilePaths, outFilePaths):
        # read the first two columns of the input file
        logTin, logCin = np.loadtxt(inFilePath,
                                    usecols=(0, 1),
                                    skiprows=11,
                                    unpack=True)

        # interpolate the heat capacity values on a larger grid, to enable accurate integration
        logT = np.linspace(logTin[0], logTin[-1], 6000)
        logC = np.interp(logT, logTin, logCin)

        # perform the integration
        h = np.cumsum(np.log(10.) * 10.**(logC + logT) * (logT[1] - logT[0]))

        # convert units from erg/cm3 to J/m3
        h *= 0.1

        # get the actual temperature grid
        T = 10.**logT

        # write stored table
        stab.writeStoredTable(outFilePath, ['T'], ['K'], ['log'], [T], ['h'],
                              ['J/m3'], ['log'], [h])
예제 #20
0
def convertMappingsSEDFamily(inFilePaths, outFilePaths):
    # load the input file
    data = scipy.io.readsav(inFilePaths[0])

    # get the axis grid points (explicitly use float() because some are defined as strings)
    w = np.array(list(map(float,data['wave_micron']))) * 1e-6
    Z = np.array(list(map(float,data['metal']))) * 0.0122
    logC = np.array(list(map(float,data['cparam'])))
    P = 1.3806488e-23 * 1e6 * 10**np.array(list(map(float,data['ponk'])))
    fPDR = np.array((0.,1.))

    # get the luminosities hypercube
    # - convert to W/m
    # - rearrange axes from input order (lambda, fPDR, P, logC, Z) to desired order (lambda, Z, logC, P, fPDR)
    L = np.moveaxis(data['model_spectra'].T * 1e-7 * 2.99792458e8 / w**2, -1, 0)

    # reverse the wavelength axis so that wavelengths are in increasing order
    w = np.flip(w,0)
    L = np.flip(L,0)

    # write stored table
    stab.writeStoredTable(outFilePaths[0],
          ['lambda','Z','logC','P','fPDR'], ['m','1','1','Pa','1'], ['log','log','lin','log','lin'], [w,Z,logC,P,fPDR],
          ['Llambda'], ['W/m'], ['lin'], [L])   # use linear interpolation to make fPDR interpolation work
예제 #21
0
def convertTextSEDinWattsPerMicron(inFilePaths, outFilePaths):
    for inFilePath, outFilePath in zip(inFilePaths, outFilePaths):
        w, L = np.loadtxt(inFilePath, unpack=True)
        stab.writeStoredTable(outFilePath, ['lambda'], ['m'], ['log'], [w*1e-6],
                                           ['Llambda'], ['W/m'], ['log'], [L*1e6])
예제 #22
0
def convertStokesPolarizedOpticalProps(inFilePaths, outFilePaths):
    # open the input file
    infile = stab.TokenizedFile(open(inFilePaths[0]))

    # skip header lines and read the grid sizes (sizes are given as n-1 in input file)
    for h in range(int(infile.next())):
        infile.skipLine()
    Na = int(infile.next()) + 1
    infile.skipToEndOfLine()
    Nlambda = int(infile.next()) + 1
    infile.skipToEndOfLine()
    Ntheta = int(infile.next()) + 1
    infile.skipToEndOfLine()
    for h in range(3):
        infile.skipLine()

    # allocate arrays
    w = np.zeros(Nlambda)
    a = np.zeros(Na)
    theta = np.zeros(Ntheta)
    Qabs = np.zeros((Na, Nlambda))
    Qsca = np.zeros((Na, Nlambda))
    g = np.zeros(
        (Na, Nlambda
         ))  # g remains zero because it is unused for polarized dust mixes
    S11 = np.zeros((Na, Nlambda, Ntheta))
    S12 = np.zeros((Na, Nlambda, Ntheta))
    S33 = np.zeros((Na, Nlambda, Ntheta))
    S34 = np.zeros((Na, Nlambda, Ntheta))

    # read the data
    for i in range(Na):
        a[i] = float(infile.next())
        infile.skipToEndOfLine()

        for k in range(Nlambda - 1, -1, -1):
            if infile.next() != '#':
                raise ValueError("Expected line with wavelength column titles")
            infile.skipToEndOfLine()
            w[k] = float(infile.next())
            Qabs[i, k] = float(infile.next())
            Qsca[i, k] = float(infile.next())
            if infile.next() != '#':
                raise ValueError("Expected line with angle column titles")
            infile.skipToEndOfLine()

            for t in range(Ntheta):
                theta[t] = float(infile.next())
                S11[i, k, t] = float(infile.next())
                S12[i, k, t] = float(infile.next())
                S33[i, k, t] = float(infile.next())
                S34[i, k, t] = float(infile.next())

    # convert units
    w *= 1e-6
    a *= 1e-6
    theta *= np.pi / 180.

    # write stored table with absorption and scattering coefficients (plus dummy anisotropy parameter)
    stab.writeStoredTable(outFilePaths[0], ['a', 'lambda'], ['m', 'm'],
                          ['log', 'log'], [a, w], ['Qabs', 'Qsca', 'g'],
                          ['1', '1', '1'], ['log', 'log', 'lin'],
                          [Qabs, Qsca, g])

    # write stored table with Mueller matrix coeffients
    stab.writeStoredTable(outFilePaths[1], ['a', 'lambda', 'theta'],
                          ['m', 'm', 'rad'], ['log', 'log', 'lin'],
                          [a, w, theta], ['S11', 'S12', 'S33', 'S34'],
                          ['1', '1', '1', '1'], ['lin', 'lin', 'lin', 'lin'],
                          [S11, S12, S33, S34])