# bolton@utah@iac 2014mayo


# Set this:
# export CAP_DATA_DIR=/data/CAP/bad

# Set the filename:
# Need to loop over 1--5 somehow...
fname = os.getenv('CAP_DATA_DIR') + '/ndArch-nsc5-bad00.fits'

# Get the data:
data, baselines, infodict = io.read_ndArch(fname)

# Build the log-lambda baseline and pixel boundaries:
hires_loglam_air = infodict['coeff0'] + infodict['coeff1'] * n.arange(infodict['nwave'])
hires_logbound_air = pxs.cen2bound(hires_loglam_air)
hires_wave_air = 10.**hires_loglam_air
hires_wavebound_air = 10.**hires_logbound_air
hires_dwave_air = hires_wavebound_air[1:] - hires_wavebound_air[:-1]

# Transform the baselines to vacuum wavelengths:
hires_wave = a2v.a2v(hires_wave_air)
hires_wavebound = a2v.a2v(hires_wavebound_air)
hires_dwave = hires_wavebound[1:] - hires_wavebound[:-1]

# Not sure what the interpretation is at wavelengths below 2000ang...
# We will probably cut that out for now anyway, since these are the
# first-pass redshift templates that we are building.

# Now, we want a flattened view (not copy) of the hi-res data:
npars = data.size // infodict['nwave']
# Exponent to divide out:
exp_div = 12
ssp_flam /= 10.**exp_div

# Get a subset of the templates:
izero = n.argmin(n.abs(n.log10(ssp_agegyr) - (-2.5)))
idx = 10 * n.arange(15) + izero
n.log10(ssp_agegyr[idx])
nsub_age = len(idx)

# Work out the pixel boundaries and widths
# (we go to log-lambda because the
# spacing is uniform in that):
ssp_loglam = n.log10(ssp_wave)
ssp_logbound = pxs.cen2bound(ssp_loglam)
ssp_wavebound = 10.**ssp_logbound
#ssp_dwave = ssp_wavebound[1:] - ssp_wavebound[:-1]
# Note that there is some slight unsmoothness in the
# width of the sampling intervals.  I think this is fine,
# but it will imply a slightly variable resolution
# implicit in the data as it currently stands if we dial
# it in straight from the pixel widths.  So instead, let's
# try setting it based on the average loglam spacing,
# since that is essentially uniform.
# p.plot(ssp_logbound[1:] - ssp_logbound[:-1], hold=False)
dloglam = n.mean(ssp_logbound[1:] - ssp_logbound[:-1])

# This will work:
#ssp_dwave = 10.**(ssp_loglam + 0.5 * dloglam) - \
#            10.**(ssp_loglam - 0.5 * dloglam)
Beispiel #3
0
# Exponent to divide out:
exp_div = 12
ssp_flam /= 10.**exp_div

# Get a subset of the templates:
izero = n.argmin(n.abs(n.log10(ssp_agegyr) - (-2.5)))
idx = 10 * n.arange(15) + izero
n.log10(ssp_agegyr[idx])
nsub_age = len(idx)

# Work out the pixel boundaries and widths
# (we go to log-lambda because the
# spacing is uniform in that):
ssp_loglam = n.log10(ssp_wave)
ssp_logbound = pxs.cen2bound(ssp_loglam)
ssp_wavebound = 10.**ssp_logbound
#ssp_dwave = ssp_wavebound[1:] - ssp_wavebound[:-1]
# Note that there is some slight unsmoothness in the
# width of the sampling intervals.  I think this is fine,
# but it will imply a slightly variable resolution
# implicit in the data as it currently stands if we dial
# it in straight from the pixel widths.  So instead, let's
# try setting it based on the average loglam spacing,
# since that is essentially uniform.
# p.plot(ssp_logbound[1:] - ssp_logbound[:-1], hold=False)
dloglam = n.mean(ssp_logbound[1:] - ssp_logbound[:-1])

# This will work:
#ssp_dwave = 10.**(ssp_loglam + 0.5 * dloglam) - \
#            10.**(ssp_loglam - 0.5 * dloglam)
# Exponent to divide out:
exp_div = 12
ssp_flam /= 10.**exp_div

# Get a subset of the templates:
izero = n.argmin(n.abs(n.log10(ssp_agegyr) - (-2.5)))
idx = 10 * n.arange(15) + izero
n.log10(ssp_agegyr[idx])
nsub_age = len(idx)

# Work out the pixel boundaries and widths
# (we go to log-lambda because the
# spacing is uniform in that):
ssp_loglam = n.log10(ssp_wave)
ssp_logbound = pxs.cen2bound(ssp_loglam)
ssp_wavebound = 10.**ssp_logbound
#ssp_dwave = ssp_wavebound[1:] - ssp_wavebound[:-1]
# Note that there is some slight unsmoothness in the
# width of the sampling intervals.  I think this is fine,
# but it will imply a slightly variable resolution
# implicit in the data as it currently stands if we dial
# it in straight from the pixel widths.  So instead, let's
# try setting it based on the average loglam spacing,
# since that is essentially uniform.
# p.plot(ssp_logbound[1:] - ssp_logbound[:-1], hold=False)
dloglam = n.mean(ssp_logbound[1:] - ssp_logbound[:-1])

# This will work:
#ssp_dwave = 10.**(ssp_loglam + 0.5 * dloglam) - \
#            10.**(ssp_loglam - 0.5 * dloglam)
Beispiel #5
0
# bolton@utah@iac 2014mayo

# Set this:
# export CAP_DATA_DIR=/data/CAP/bad

# Set the filename:
# Need to loop over 1--5 somehow...
fname = os.getenv('CAP_DATA_DIR') + '/ndArch-nsc5-bad00.fits'

# Get the data:
data, baselines, infodict = io.read_ndArch(fname)

# Build the log-lambda baseline and pixel boundaries:
hires_loglam_air = infodict['coeff0'] + infodict['coeff1'] * n.arange(
    infodict['nwave'])
hires_logbound_air = pxs.cen2bound(hires_loglam_air)
hires_wave_air = 10.**hires_loglam_air
hires_wavebound_air = 10.**hires_logbound_air
hires_dwave_air = hires_wavebound_air[1:] - hires_wavebound_air[:-1]

# Transform the baselines to vacuum wavelengths:
hires_wave = a2v.a2v(hires_wave_air)
hires_wavebound = a2v.a2v(hires_wavebound_air)
hires_dwave = hires_wavebound[1:] - hires_wavebound[:-1]

# Not sure what the interpretation is at wavelengths below 2000ang...
# We will probably cut that out for now anyway, since these are the
# first-pass redshift templates that we are building.

# Now, we want a flattened view (not copy) of the hi-res data:
npars = data.size // infodict['nwave']