Example #1
0
def betas_from_lower_subsample(lam_sub_hi, lam_sub_lo):
    lam_lo, lam_lo_delta = ru.get_lam0(lam_subsample=lam_sub_lo)
    lam_hi, lam_hi_delta = ru.get_lam0(lam_subsample=lam_sub_hi)
    basis_file = BASIS_DIR + "basis_fit_K-%d_V-%d_split-%s.pkl" % \
                 (NUM_BASES, len(lam_lo), SPLIT_TYPE)
    if os.path.exists(basis_file):
        print "grabbing file (%s) from CACHE, optimizing more!"%basis_file
        th, lam0, lam0_delta, parser = qfb.load_basis_fit(basis_file)
        betas_lo = parser.get(th, 'betas')
        betas_hi = np.array([np.interp(lam_hi, lam_lo, beta) for beta in betas_lo])
        return betas_hi
    else:
        return None
Example #2
0
def load_fit_params(num_bases, split_type, lam_subsample, basis_dir=""):
    ### load MLE basis 
    lam0, lam0_delta = ru.get_lam0(lam_subsample=lam_subsample)
    basis_file = os.path.join(basis_dir,
                              qfb.basis_filename(num_bases  = num_bases,
                                                 split_type = split_type,
                                                 lam0       = lam0))
    print "loading fit file: ", basis_file
    th, lam0, lam0_delta, parser = load_basis_fit(basis_file)
    mus    = parser.get(th, 'mus')
    betas  = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    return mus, betas, omegas, th, lam0, lam0_delta, parser
Example #3
0
def betas_from_lower_subsample(lam_sub_hi, lam_sub_lo):
    lam_lo, lam_lo_delta = ru.get_lam0(lam_subsample=lam_sub_lo)
    lam_hi, lam_hi_delta = ru.get_lam0(lam_subsample=lam_sub_hi)
    basis_file = BASIS_DIR + "basis_fit_K-%d_V-%d_split-%s.pkl" % \
                 (NUM_BASES, len(lam_lo), SPLIT_TYPE)
    if os.path.exists(basis_file):
        print "grabbing file (%s) from CACHE, optimizing more!" % basis_file
        th, lam0, lam0_delta, parser = qfb.load_basis_fit(basis_file)
        betas_lo = parser.get(th, 'betas')
        betas_hi = np.array(
            [np.interp(lam_hi, lam_lo, beta) for beta in betas_lo])
        return betas_hi
    else:
        return None
Example #4
0
def load_basis(num_bases, split_type, lam_subsample, basis_dir=""):
    ### load MLE basis 
    lam0, lam0_delta = ru.get_lam0(lam_subsample=lam_subsample)
    basis_file = os.path.join(basis_dir,
                              qfb.basis_filename(num_bases  = num_bases,
                                                 split_type = split_type,
                                                 lam0       = lam0))
    th, lam0, lam0_delta, parser = load_basis_fit(basis_file)
    mus    = parser.get(th, 'mus')
    betas  = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    W_mle  = np.exp(omegas)
    W_mle /= np.sum(W_mle, axis=1, keepdims=True)
    B_mle  = np.exp(betas)
    B_mle /= np.sum(B_mle * lam0_delta, axis=1, keepdims=True)
    M_mle = np.exp(mus)
    return B_mle
Example #5
0
            th = initialize_from_lower_res(th_lo, lam_lo, parser_lo, lam0,
                                           parser)
        else:
            th = np.zeros(parser.N)
            parser.set(th, 'betas',
                       .01 * K_chol.dot(np.random.randn(Vspec, NUM_BASES)).T)
            parser.set(th, 'omegas', .01 * npr.randn(Nspec, NUM_BASES))
            parser.set(th, 'mus', .01 * npr.randn(Nspec))

        # already in cache (WE ARE OPTIMIZING IN CHOLESKY DECOMP MODE, SO
        # WHITEN THE MATRIX)
        basis_file = "cache/basis_fits/basis_fit_K-%d_V-%d_split-%s.pkl" % (
            NUM_BASES, len(lam0), SPLIT_TYPE)
        if os.path.exists(basis_file):
            print "grabbing file (%s) from CACHE, optimizing more!" % basis_file
            th, lam0, lam0_delta, parser = qfb.load_basis_fit(basis_file)
            betas = parser.get(th, 'betas')
            betas_white = np.linalg.solve(K_chol, betas.T).T
            parser.set(th, 'betas', betas_white)

        betas_from_lo = betas_from_lower_subsample(lam_sub_hi=lam_subsample,
                                                   lam_sub_lo=10)
        if betas_from_lo is not None:
            print "betas from lo??"
            betas_white = np.linalg.solve(K_chol, betas_from_lo.T).T
            parser.set(th, 'betas', betas_white)

        ## make sure loss works
        print "Starting at loss: %2.5g" % (loss_fun(th) + prior_loss(th))

        def full_loss_grad(th, idx=None):
Example #6
0
    mus    = parser.get(th, 'mus')
    betas  = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    W = np.exp(omegas)
    W = W / np.sum(W, axis=1, keepdims=True)
    B = np.exp(betas)
    B = B / np.sum(B * lam0_delta, axis=1, keepdims=True)
    M = np.exp(mus)
    return W, B, M

#######################################################################
out_dir = "/Users/acm/Dropbox/Proj/astro/DESIMCMC/tex/quasar_z/figs/"
V = 1364 #2728        #  "1364"  or 2728
bdir = "/Users/acm/Proj/DESIMCMC/experiments/redshift/cache/basis_locked/"
bname = "basis_fit_K-4_V-1364_split-random.pkl"
th, lam0, lam0_delta, parser = load_basis_fit(os.path.join(bdir, bname))
W_mle, B_mle, M_mle = make_params(th, parser, lam0_delta)

# basis for figure
#bfile = "/UAsers/acm/Dropbox/Proj/astro/DESIMCMC/experiments/redshift/cache/old/basis_th_K-4_V-%d.npy"%V
#f = open(bfile, 'rb')
#lam0, lam0_delta = get_lam0(lam_subsample=5)
#a = np.load(bfile)
#W_mle = np.exp(a[:, :400])
#W_mle = W_mle / np.sum(W_mle, axis=1, keepdims=True)
#B_mle = np.exp(a[:, 400:])
#B_mle = B_mle / np.sum(B_mle * lam0_delta, axis=1, keepdims=True)
#K = B_mle.shape[0]

######################################################################
## PLOT MLE BASIS
## Load in spectroscopically measured quasars + fluxes
##########################################################################
#qso_df  = fitsio.FITS('../../data/DR10QSO/DR10Q_v2.fits')[1].read()
#
# set TEST INDICES 
#npr.seed(13)
#test_idx = npr.permutation(len(qso_df))

##########################################################################
## Load in basis fit (or basis samples)
##########################################################################
### load ML basis from cache (beta and omega values)
basis_cache = 'cache/basis_fit_K-4_V-2728.pkl'
USE_CACHE = True
if os.path.exists(basis_cache) and USE_CACHE:
    th, lam0, lam0_delta, parser = load_basis_fit(basis_cache)
# compute actual weights and basis values (normalized basis + weights)
mus    = parser.get(th, 'mus')
betas  = parser.get(th, 'betas')
omegas = parser.get(th, 'omegas')
W = np.exp(omegas)
W = W / np.sum(W, axis=1, keepdims=True)
B = np.exp(betas)
B = B / np.sum(B * lam0_delta, axis=1, keepdims=True)
M = np.exp(mus)

##############################################################################
## saved quasar-specific files
##############################################################################
qso_sample_files = glob('cache_remote/temper_experiment1/redshift_samples*.npy')
qso_sample_files.sort()
    p_lower = l_lower * prior_z(z) * prior_gamma(gamma) * prior_m(m_lower)

    l_upper = pixel_likelihood(z, w, m_upper, fluxes, fluxes_ivar, lam0, B)
    p_upper = l_upper * prior_z(z) * prior_gamma(gamma) * prior_m(m_upper)

    grad_m = (p_upper - p_lower) / (2 * delta_m)
    lls.append(grad_m)

    return np.array(lls)


### load ML basis from cache (beta and omega values)
basis_cache = 'cache/basis_fit_K-4_V-1364.pkl'
USE_CACHE = True
if os.path.exists(basis_cache) and USE_CACHE:
    th, lam0, lam0_delta, parser = load_basis_fit(basis_cache)

# compute actual weights and basis values (normalized basis + weights)
mus    = parser.get(th, 'mus')
betas  = parser.get(th, 'betas')
omegas = parser.get(th, 'omegas')
W = np.exp(omegas)
W = W / np.sum(W, axis=1, keepdims=True)
B = np.exp(betas)
B = B / np.sum(B * lam0_delta, axis=1, keepdims=True)
M = np.exp(mus)

# load in some quasar fluxes
qso_df        = fitsio.FITS('../../data/DR10QSO/DR10Q_v2.fits')
psf_flux      = qso_df[1]['PSFFLUX'].read()
psf_flux_ivar = qso_df[1]['IVAR_PSFFLUX'].read()
Example #9
0
        ## initialize basis and weights
        Nspec, Vspec = spectra_resampled.shape
        if lam_idx > 0:
            th = initialize_from_lower_res(th_lo, lam_lo, parser_lo, lam0, parser)
        else: 
            th = np.zeros(parser.N)
            parser.set(th, 'betas', .01 * K_chol.dot(np.random.randn(Vspec, NUM_BASES)).T)
            parser.set(th, 'omegas', .01 * npr.randn(Nspec, NUM_BASES))
            parser.set(th, 'mus', .01 * npr.randn(Nspec))

        # already in cache (WE ARE OPTIMIZING IN CHOLESKY DECOMP MODE, SO 
        # WHITEN THE MATRIX)
        basis_file = "cache/basis_fits/basis_fit_K-%d_V-%d_split-%s.pkl"%(NUM_BASES, len(lam0), SPLIT_TYPE)
        if os.path.exists(basis_file):
            print "grabbing file (%s) from CACHE, optimizing more!"%basis_file
            th, lam0, lam0_delta, parser = qfb.load_basis_fit(basis_file)
            betas = parser.get(th, 'betas')
            betas_white = np.linalg.solve(K_chol, betas.T).T
            parser.set(th, 'betas', betas_white)

        betas_from_lo = betas_from_lower_subsample(lam_sub_hi = lam_subsample, 
                                                   lam_sub_lo = 10)
        if betas_from_lo is not None:
            print "betas from lo??"
            betas_white = np.linalg.solve(K_chol, betas_from_lo.T).T
            parser.set(th, 'betas', betas_white)

        ## make sure loss works
        print "Starting at loss: %2.5g"%(loss_fun(th) + prior_loss(th))
        def full_loss_grad(th, idx=None):
            return loss_grad(th, idx) + prior_loss_grad(th, idx)
Example #10
0
    betas = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    W = np.exp(omegas)
    W = W / np.sum(W, axis=1, keepdims=True)
    B = np.exp(betas)
    B = B / np.sum(B * lam0_delta, axis=1, keepdims=True)
    M = np.exp(mus)
    return W, B, M


#######################################################################
out_dir = "/Users/acm/Dropbox/Proj/astro/DESIMCMC/tex/quasar_z/figs/"
V = 1364  #2728        #  "1364"  or 2728
bdir = "/Users/acm/Proj/DESIMCMC/experiments/redshift/cache/basis_locked/"
bname = "basis_fit_K-4_V-1364_split-random.pkl"
th, lam0, lam0_delta, parser = load_basis_fit(os.path.join(bdir, bname))
W_mle, B_mle, M_mle = make_params(th, parser, lam0_delta)

# basis for figure
#bfile = "/UAsers/acm/Dropbox/Proj/astro/DESIMCMC/experiments/redshift/cache/old/basis_th_K-4_V-%d.npy"%V
#f = open(bfile, 'rb')
#lam0, lam0_delta = get_lam0(lam_subsample=5)
#a = np.load(bfile)
#W_mle = np.exp(a[:, :400])
#W_mle = W_mle / np.sum(W_mle, axis=1, keepdims=True)
#B_mle = np.exp(a[:, 400:])
#B_mle = B_mle / np.sum(B_mle * lam0_delta, axis=1, keepdims=True)
#K = B_mle.shape[0]

######################################################################
## PLOT MLE BASIS
Example #11
0
    ### load MCMC sample files
    sample_files = glob('cache_remote/photo_experiment0/basis_samples_K-4_V-1364_chain_*.npy')
    chains = {}
    for sfile in sample_files:
        th_samples, ll_samps, lam0, lam0_delta, parser, chain_idx = \
            load_basis_samples(sfile)
        chains[chain_idx] = {'th':th_samples, 'lls':ll_samps, 'parser':parser}

    # visualize LL trace for each chain
    for i in range(len(chains)): 
        plt.plot(chains[i]['lls'][-1000:])
    plt.show()

    ### load MLE basis 
    th, lam0, lam0_delta, parser = load_basis_fit('cache/basis_fit_K-4_V-1364.pkl')
    mus    = parser.get(th, 'mus')
    betas  = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    W_mle  = np.exp(omegas)
    W_mle /= np.sum(W_mle, axis=1, keepdims=True)
    B_mle  = np.exp(betas)
    B_mle /= np.sum(B_mle * lam0_delta, axis=1, keepdims=True)
    M_mle = np.exp(mus)

    ### load training data
    lam_obs, qtrain, qtest = \
        load_data_clean_split(spec_fits_file = 'quasar_data.fits',
                              Ntrain = 400)
    N = qtrain['spectra'].shape[0]
Example #12
0
    for sfile in sample_files:
        th_samples, ll_samps, lam0, lam0_delta, parser, chain_idx = \
            load_basis_samples(sfile)
        chains[chain_idx] = {
            'th': th_samples,
            'lls': ll_samps,
            'parser': parser
        }

    # visualize LL trace for each chain
    for i in range(len(chains)):
        plt.plot(chains[i]['lls'][-1000:])
    plt.show()

    ### load MLE basis
    th, lam0, lam0_delta, parser = load_basis_fit(
        'cache/basis_fit_K-4_V-1364.pkl')
    mus = parser.get(th, 'mus')
    betas = parser.get(th, 'betas')
    omegas = parser.get(th, 'omegas')
    W_mle = np.exp(omegas)
    W_mle /= np.sum(W_mle, axis=1, keepdims=True)
    B_mle = np.exp(betas)
    B_mle /= np.sum(B_mle * lam0_delta, axis=1, keepdims=True)
    M_mle = np.exp(mus)

    ### load training data
    lam_obs, qtrain, qtest = \
        load_data_clean_split(spec_fits_file = 'quasar_data.fits',
                              Ntrain = 400)
    N = qtrain['spectra'].shape[0]