Exemplo n.º 1
0
def runGibbs(prior, posterior_object_for_sample, ndim, niters, niters_burn, outdir, snapshot=False, datafname=None, diagnose=False):
    print('-------- BEGINNING BURN-IN CHAIN --------')
    print('burn in iterations: ', niters_burn)

    prior = prior
    ndat = posterior_object_for_sample.ndat
    # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1,3)))

    # empty chain for parameter vectors
    chains = []
    loglike = 0
    param = prior   #  start parameter vector as the prior vector
    n_accept_cosmo = 0.0
    n_accept_B = 0.0

    chain_tab = pd.DataFrame(columns=['alpha', 'beta', 'rx', 'rc', 'sigma_res', 'cstar', 'xstar', 'mstar', 'omegam', 'w', 'h'])
    entry_list = []

    # create positive semi-definite covariance matrix for proposal distributions
    from sklearn import datasets
    #cov_proposal_cosmo = datasets.make_spd_matrix(n_dim = 2, random_state=None) * 0.1  # EDIT: just sample omegam, omegade
    cov_proposal_cosmo = np.array([[5, -0.02],
                                    [-0.02, 1]]) * (1./80) * ((2.38**2)/2)  # estimated from previous inference
    # now compute proposal sigmas for SALT-2 params
    #cov_proposal_B = datasets.make_spd_matrix(n_dim = 2, random_state=None) * 0.1
    cov_proposal_B = np.array([[0.02, -0.023],
                                [-0.023, 0.2]]) * (2.38**2 / 2)  # estimated from inference
    
    # We run our initial chains to get an estimate of the covariance between cosmo_params and cov between B_params    
    for iter in range(niters_burn):

        param1,n_accept_cosmo,loglike,log_corr = step_one(posterior_object_for_sample, loglike, param, ndim, cov_proposal_cosmo, n_accept_cosmo)
        param2,n_accept_B,loglike,log_corr = step_two(posterior_object_for_sample, loglike, param1, ndim, cov_proposal_B, n_accept_B)
        D,param3 = step_three(posterior_object_for_sample, D, param2, ndim)               # update D
        param4 = step_four(posterior_object_for_sample, D, param3, ndim) 
        param5 = step_five(posterior_object_for_sample, D, param4, ndim)
        param6 = step_six(posterior_object_for_sample, D, param5, ndim)
        entry_list.append(pd.Series(param6, index=chain_tab.columns))   # move on in sample space
        param = param6

        #print(param)

    #print('proposal estimation acceptance fraction = ', n_accept_cosmo / niters)
    # add to chains dataframe
    chain_tab = chain_tab.append(entry_list, ignore_index=True)

    # compute proposal sigmas for C and B:
    omegam = chain_tab['omegam'].values
    w = chain_tab['w'].values
    h = chain_tab['h'].values

    alpha = chain_tab['alpha'].values
    beta = chain_tab['beta'].values

    cosmo_proposal_cov = np.cov(np.matrix([omegam, w]))   # EDIT: just sample omegam, omegade
    B_proposal_cov = np.cov(np.matrix([alpha, beta]))       
    
    print('cosmo proposal covariance: ', cosmo_proposal_cov)
    #print('B proposal cov: ', B_proposal_cov)

    print('-------- STARTING ACTUAL SAMPLER CHAIN -------- ')
    #print('gibbs sampling for {} iterations'.format(niters))
            # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1,3)))

    # empty chain for parameter vectors
    chains = []
    loglike = 0
    
    D_chain = [] # for making diagnostic plot for subset of SN1a
    # make prior cube
    cube = gibbs_library.makePriorCube(ndim)
    param = gibbs_library.vanillaPrior(cube)  # start with prior again

    # reset acceptance fraction
    n_accept_cosmo = 0.0
    n_accept_B = 0.0
    true_param = [0.13, 2.56, 
            1.0, 0.1, 0.1, 0., 0., -19.3, 0.3, 0.7, 0.72]
    param[2:8] = true_param[2:8]
    print('og param', param)
    

    # open write file in case the PBS job gets killed part way:
    fname = outdir + 'post_chains_in_prog.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            
        for iter in range(niters):
            #t0 = time.time()
            param,n_accept_cosmo,loglike,log_correction = step_one(posterior_object_for_sample, loglike, param, ndim, cosmo_proposal_cov, n_accept_cosmo)
            #t1 = time.time()
            #print('step 1 comp time: ', t1-t0)
           # t0 = time.time()
            param,n_accept_B,loglike,log_correction = step_two(posterior_object_for_sample, loglike, param, ndim, cov_proposal=B_proposal_cov, n_accept_B=n_accept_B)
            #t1 = time.time()
           # print('step 2 comp time: ', t1-t0)
           # t0 = time.time()
            #param0 = param
            
            #D,param0 = step_three(posterior_object_for_sample, D, param0, ndim) # update D
           # t1 = time.time()
           # print('step 3 comp time: ', t1-t0)
            #param = step_four(posterior_object_for_sample, D, param, ndim) 
            #param = step_five(posterior_object_for_sample, D, param, ndim)
            #param = step_six(posterior_object_for_sample, D, param, ndim)
            #print(param)
            
            
            loglike = [loglike]
            param_loglike = param + loglike + [log_correction]
            chains.append(np.asarray(param_loglike))   # move on in sample space
            
            if iter % 10 == 0:
                print('current parameter vector \nand likelihood for iter {}: '.format(iter), param_loglike)
            if diagnose == True:
                # latent vals from chain
                c = D[0::3]
                x1 = D[1::3]
                mB = D[2::3]
                # take all SN1a
                #c = c[::10]
                #x1 = x1[::10]
                #mB = mB[::10]
                D_save = [] # thinned latent vector to be saved
                for i in range(len(c)):
                    D_save.append(c[i])
                    D_save.append(x1[i])
                    D_save.append(mB[i])
                
                D_chain.append(D_save) # add to trace chain
                

            wr.writerow(param_loglike)    # write the parameter step as we go.

            if snapshot == True:
                if iter == niters-1:
                    latent_plots.plot_attributes(D, param, iter)

    print('sampling complete: cosmo param acceptance fraction = ', n_accept_cosmo/niters)
    print('sampling complete: SALT-2 param acceptance fraction = ', n_accept_B/niters)
    fname = outdir + 'post_chains.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for i in chains:
            wr.writerow(i)
    
    if diagnose == True:
        fname = outdir + 'D_latent.csv'
        # save chain output for diagnostic purposes
        with open(fname, 'w') as myfile:
            wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            for i in D_chain:
                wr.writerow(i)


    return n_accept_cosmo / niters, n_accept_B / niters
Exemplo n.º 2
0
def runGibbs(prior,
             posterior_object_for_sample,
             ndim,
             niters,
             outdir,
             snapshot=False,
             datafname=None,
             diagnose=False):
    ndat = posterior_object_for_sample.ndat
    # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1, 3)))

    # empty chain for parameter vectors
    chains = []
    param = prior  #  start parameter vector as the prior vector

    # evaluate log-likelihood at the original parameter vector
    loglike = posterior_object_for_sample.log_like_selection(param)

    # define acceptance fractions
    n_accept_cosmo = 0.0  # step 1
    n_accept_B = 0.0  # step 2
    n_accept_dstar = 0.0  # step 3
    n_accept_sigmares = 0.0  # step 4
    n_accept_rx = 0.0  # step 5
    n_accept_rc = 0.0  # step 6

    # define jump proposals
    cov_proposal_cosmo = np.array(
        [[0.00862212, 0.01333633], [0.01333633, 0.02501767]]) * (
            (2.38**2) / 2) * 10  # EDIT: just sample omegam, omegade

    cov_proposal_B = np.array([[7.51338227e-05, -7.81496651e-07],
                               [-7.81496651e-07, 7.63401552e-03]
                               ]) * (2.38**2 / 2) * 10

    cov_proposal_p = np.array([[
        2.26136092e-05, 4.55322597e-06, -8.29879371e-06
    ], [
        4.55322597e-06, 2.29976669e-03, 3.91206305e-05
    ], [-8.29879371e-06, 3.91206305e-05, 4.14424789e-04]]) * (2.38**2 / 3) * 10

    cov_proposal_sr = 0.00013154 * (2.38**2) * 10
    cov_proposal_rx = 0.00119617 * (2.38**2) * 10
    cov_proposal_rc = 1.27621525e-05 * (2.38**2) * 10

    print('-------- STARTING ACTUAL SAMPLER CHAIN -------- ')
    print('gibbs sampling for {} iterations'.format(niters))

    # empty chain for parameter vectors
    chains = []

    D_chain = []  # for making diagnostic plot for subset of SN1a
    # start with prior again

    true_param = [0.13, 2.56, 1.0, 0.1, 0.1, 0., 0., -19.3, 0.3, 0.7, 0.72]

    # open write file in case the PBS job gets killed part way:
    fname = outdir + 'post_chains_in_prog.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)

        for iter in range(niters):

            param, n_accept_cosmo, loglike = step_one(
                posterior_object_for_sample, loglike, param, ndim,
                cov_proposal_cosmo, n_accept_cosmo)
            param, n_accept_B, loglike = step_two(posterior_object_for_sample,
                                                  loglike,
                                                  param,
                                                  ndim,
                                                  cov_proposal=cov_proposal_B,
                                                  n_accept_B=n_accept_B)
            D, param, n_accept_dstar, loglike = step_three(
                posterior_object_for_sample, loglike, D, param, ndim,
                cov_proposal_p, n_accept_dstar)  # update D
            param, n_accept_sigmares, loglike = step_four(
                posterior_object_for_sample, loglike, D, param, ndim,
                cov_proposal_sr, n_accept_sigmares)
            param, n_accept_rx, loglike = step_five(
                posterior_object_for_sample, loglike, D, param, ndim,
                cov_proposal_rx, n_accept_rx)
            param, n_accept_rc, loglike = step_six(posterior_object_for_sample,
                                                   loglike, D, param, ndim,
                                                   cov_proposal_rc,
                                                   n_accept_rc)

            param_loglike = param + [loglike]
            chains.append(np.asarray(param_loglike))  # move on in sample space

            if iter % 1 == 0:
                print(
                    'current parameter vector \nand likelihood for iter {}: '.
                    format(iter), param_loglike)
            if diagnose == True:
                # latent values from chain
                c = D[0::3]
                x1 = D[1::3]
                mB = D[2::3]
                D_save = []  # thinned latent vector to be saved
                for i in range(len(c)):
                    D_save.append(c[i])
                    D_save.append(x1[i])
                    D_save.append(mB[i])

                D_chain.append(D_save)  # add to trace chain

            wr.writerow(param_loglike)  # write the parameter step as we go.

            if snapshot == True:
                if iter == niters - 1:
                    latent_plots.plot_attributes(D, param, iter)

    acc_numbs = [
        n_accept_cosmo, n_accept_B, n_accept_dstar, n_accept_sigmares,
        n_accept_rx, n_accept_rc
    ]
    print('acc numbs: ', acc_numbs)
    acceptance_fracs = []
    acceptance_fracs = [i / niters for i in acc_numbs]

    print('sampling complete')
    for step in range(len(acc_numbs)):
        print('acceptance fraction for step {}: '.format(step + 1),
              acceptance_fracs[step])

    fname = outdir + 'post_chains.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for i in chains:
            wr.writerow(i)

    if diagnose == True:
        fname = outdir + 'D_latent.csv'
        # save chain output for diagnostic purposes
        with open(fname, 'w') as myfile:
            wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            for i in D_chain:
                wr.writerow(i)

    return acceptance_fracs
Exemplo n.º 3
0
def runGibbs(prior, posterior_object_for_sample, ndim, niters, niters_burn, outdir, snapshot=False, datafname=None, diagnose=False):
    print('-------- BEGINNING BURN-IN CHAIN --------')
    print('burn in iterations: ', niters_burn)

    param = prior
    ndat = posterior_object_for_sample.ndat
    # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1,3)))

    # empty chain for parameter vectors
    chains = []

    # compute loglike to initialize sampling
    loglike = posterior_object_for_sample.log_likelihood(param)
    param = prior   #  start parameter vector as the prior vector
    n_accept_cosmo = 0.0
    n_accept_B = 0.0

    chain_tab = pd.DataFrame(columns=['alpha', 'beta', 'rx', 'rc', 'sigma_res', 'cstar', 'xstar', 'mstar', 'omegam', 'w', 'h'])
    entry_list = []

    
    # Estimated from MultiNest runs
    cov_proposal_cosmo = np.array([[0.00862212, 0.01333633],
        [0.01333633, 0.02501767]]) * (2.38**2 / 2) # EDIT: just sample omegam, omegade

    cov_proposal_B = np.array([[ 7.51338227e-05, -7.81496651e-07],
        [-7.81496651e-07,  7.63401552e-03]]) * (2.38**2 / 2)
    
    # We run our initial chains to get an estimate of the covariance between cosmo_params and cov between B_params    
    for iter in range(niters_burn):

        param,n_accept_cosmo,loglike = step_one(posterior_object_for_sample, D, loglike, param, ndim, cov_proposal_cosmo, n_accept_cosmo)
        param,n_accept_B,loglike = step_two(posterior_object_for_sample, D, loglike, param, ndim, cov_proposal_B, n_accept_B)
        D,param = step_three(posterior_object_for_sample, D, param, ndim)               # update D
        param = step_four(posterior_object_for_sample, D, param, ndim) 
        param = step_five(posterior_object_for_sample, D, param, ndim)
        param = step_six(posterior_object_for_sample, D, param, ndim)
        entry_list.append(pd.Series(param, index=chain_tab.columns))   # move on in sample space

        #print(param)

    #print('proposal estimation acceptance fraction = ', n_accept_cosmo / niters)
    # add to chains dataframe
    chain_tab = chain_tab.append(entry_list, ignore_index=True)

    # compute proposal sigmas for C and B:
    omegam = chain_tab['omegam'].values
    w = chain_tab['w'].values
    h = chain_tab['h'].values

    alpha = chain_tab['alpha'].values
    beta = chain_tab['beta'].values

    cosmo_proposal_cov = np.cov(np.matrix([omegam, w]))   # EDIT: just sample omegam, omegade
    B_proposal_cov = np.cov(np.matrix([alpha, beta]))       
    
    print('cosmo proposal covariance: ', cosmo_proposal_cov)
    #print('B proposal cov: ', B_proposal_cov)

    print('-------- STARTING ACTUAL SAMPLER CHAIN -------- ')
    #print('gibbs sampling for {} iterations'.format(niters))
            # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1,3)))

    # empty chain for parameter vectors
    chains = []
    # reset log-likelihood
    loglike = posterior_object_for_sample.log_likelihood(prior)    
    D_chain = [] # for making diagnostic plot for subset of SN1a


    # reset acceptance fraction
    n_accept_cosmo = 0.0
    n_accept_B = 0.0
    

    # open write file in case the PBS job gets killed part way:
    fname = outdir + 'post_chains_in_prog.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            
        for iter in range(niters):
         
            param,n_accept_cosmo,loglike = step_one(posterior_object_for_sample, D, loglike, param, ndim, cosmo_proposal_cov, n_accept_cosmo)
            param,n_accept_B,loglike = step_two(posterior_object_for_sample, D, loglike, param, ndim, cov_proposal=B_proposal_cov, n_accept_B=n_accept_B)
            D,param = step_three(posterior_object_for_sample, D, param, ndim) # update D
            param = step_four(posterior_object_for_sample, D, param, ndim) 
            param = step_five(posterior_object_for_sample, D, param, ndim)
            param = step_six(posterior_object_for_sample, D, param, ndim)


            param_loglike = param + [loglike]
            chains.append(np.asarray(param_loglike))   # move on in sample space
            
            if iter % 100 == 0:
                print('current parameter vector \nand likelihood for iter {}: '.format(iter), param_loglike)
            if diagnose == True:
                # latent vals from chain
                c = D[0::3]
                x1 = D[1::3]
                mB = D[2::3]
                D_save = [] # thinned latent vector to be saved
                for i in range(len(c)):
                    D_save.append(c[i])
                    D_save.append(x1[i])
                    D_save.append(mB[i])
                
                D_chain.append(D_save) # add to trace chain
                

            wr.writerow(param_loglike)    # write the parameter step as we go.

            if snapshot == True:
                if iter == niters-1:
                    latent_plots.plot_attributes(D, param, iter)

    print('sampling complete: cosmo param acceptance fraction = ', n_accept_cosmo/niters)
    print('sampling complete: SALT-2 param acceptance fraction = ', n_accept_B/niters)
    fname = outdir + 'post_chains.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for i in chains:
            wr.writerow(i)
    fname = outdir + 'D_latent.csv'

    # save chain output for diagnostic purposes
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for i in D_chain:
            wr.writerow(i)


    return n_accept_cosmo / niters, n_accept_B / niters
Exemplo n.º 4
0
def runGibbs(prior, posterior_object_for_sample, ndim, niters, niters_burn, outdir, snapshot=False, datafname=None, diagnose=False):
    print('-------- BEGINNING BURN-IN CHAIN --------')
    print('burn in iterations: ', niters_burn)

    ndat = posterior_object_for_sample.ndat
    # set up empty D column-stacked latent variable array
    D = []
    for i in range(ndat):
        D.append(np.zeros((1,3)))

    # empty chain for parameter vectors
    chains = []
    param = prior   #  start parameter vector as the prior vector

    # evaluate log-likelihood at the original parameter vector
    loglike = posterior_object_for_sample.log_like_selection(param)

    # define acceptance fractions
    n_accept_cosmo = 0.0      # step 1
    n_accept_B = 0.0          # step 2
    n_accept_dstar = 0.0      # step 3
    n_accept_sigmares = 0.0   # step 4   
    n_accept_rx = 0.0         # step 5
    n_accept_rc = 0.0         # step 6

    chain_tab = pd.DataFrame(columns=['alpha', 'beta', 'rx', 'rc', 'sigma_res', 'cstar', 'xstar', 'mstar', 'omegam', 'w', 'h'])
    entry_list = []

    # create positive semi-definite covariance matrix for proposal distributions
    #from sklearn import datasets
    #cov_proposal_cosmo = np.diag([np.random.rand()*5, np.random.rand()*1]) * (1./80) * ((2.38**2)/2) # EDIT: just sample omegam, omegade
    #cov_proposal_cosmo = np.array([[5, -0.02],
                            #        [-0.02, 1]]) * (1./80) * ((2.38**2)/2)  # estimated from previous inference
    # now compute proposal sigmas for SALT-2 params
    #cov_proposal_B = np.diag([np.random.rand()*0.02, np.random.rand()*0.2]) * (2.38**2 / 2)
    #cov_proposal_B = np.array([[0.02, -0.023],
                           #     [-0.023, 0.2]]) * (2.38**2 / 2)  # estimated from inference

    # Estimated from MultiNest runs
    cov_proposal_cosmo = np.array([[0.00862212, 0.01333633],
                                [0.01333633, 0.02501767]]) * ((2.38**2)/2) # EDIT: just sample omegam, omegade

    cov_proposal_B = np.array([[ 7.51338227e-05, -7.81496651e-07],
                             [-7.81496651e-07,  7.63401552e-03]]) * (2.38**2 / 2)
    
    # We run our initial chains to get an estimate of the covariance between cosmo_params and cov between B_params    
    for iter in range(niters_burn):

        param1, n_accept_cosmo, loglike    = step_one(posterior_object_for_sample, loglike, param, ndim, cov_proposal_cosmo, n_accept_cosmo)
        param2, n_accept_B,loglike         = step_two(posterior_object_for_sample, loglike, param1, ndim, cov_proposal_B, n_accept_B)
        D,param3, n_accept_dstar, loglike  = step_three(posterior_object_for_sample, loglike, D, param2, ndim, n_accept_dstar)               # update D
        param4, n_accept_sigmares, loglike = step_four(posterior_object_for_sample, loglike, D, param3, ndim, n_accept_sigmares) 
        param5, n_accept_rx, loglike       = step_five(posterior_object_for_sample, loglike, D, param4, ndim, n_accept_rx)
        param6, n_accept_rc, loglike       = step_six(posterior_object_for_sample, loglike, D, param5, ndim, n_accept_rc)
        
        entry_list.append(pd.Series(param6, index=chain_tab.columns))   # move on in sample space
        param = param6

    # add to chains dataframe
    chain_tab = chain_tab.append(entry_list, ignore_index=True)

    # compute proposal sigmas for C and B:
    omegam = chain_tab['omegam'].values
    w = chain_tab['w'].values
    h = chain_tab['h'].values

    alpha = chain_tab['alpha'].values
    beta = chain_tab['beta'].values

    #cosmo_proposal_cov = ((2.38)**2 / 2) * np.cov((chain_tab['omegam'], chain_tab['w']))   # EDIT: just sample omegam, omegade
    #B_proposal_cov = ((2.38)**2 / 2) * np.cov((chain_tab['alpha'], chain_tab['beta']))       

    # estimated from multinest

    cosmo_proposal_cov = np.array([[0.00862212, 0.01333633],
                                [0.01333633, 0.02501767]]) * ((2.38**2)/2) # EDIT: just sample omegam, omegade

    B_proposal_cov = np.array([[ 7.51338227e-05, -7.81496651e-07],
                             [-7.81496651e-07,  7.63401552e-03]]) * (2.38**2 / 2)
    
    print('cosmo proposal covariance: ', cosmo_proposal_cov)

    print('-------- STARTING ACTUAL SAMPLER CHAIN -------- ')
    print('gibbs sampling for {} iterations'.format(niters))


    # empty chain for parameter vectors
    chains = []

    
    D_chain = [] # for making diagnostic plot for subset of SN1a
    # start with prior again
    param = prior
    # re-calculate log-like
    loglike = posterior_object_for_sample.log_like_selection(param)
    # reset acceptance fraction
    n_accept_cosmo = 0.0      # step 1
    n_accept_B = 0.0          # step 2
    n_accept_dstar = 0.0      # step 3
    n_accept_sigmares = 0.0   # step 4   
    n_accept_rx = 0.0         # step 5
    n_accept_rc = 0.0         # step 6


    true_param = [0.13, 2.56, 
            1.0, 0.1, 0.1, 0., 0., -19.3, 0.3, 0.7, 0.72]
    

    # open write file in case the PBS job gets killed part way:
    fname = outdir + 'post_chains_in_prog.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            
        for iter in range(niters):

            param,n_accept_cosmo,loglike    = step_one(posterior_object_for_sample, loglike, param, ndim, cosmo_proposal_cov, n_accept_cosmo)
            param,n_accept_B,loglike        = step_two(posterior_object_for_sample, loglike, param, ndim, cov_proposal=B_proposal_cov, n_accept_B=n_accept_B)
            D,param,n_accept_dstar,loglike  = step_three(posterior_object_for_sample, loglike, D, param, ndim, n_accept_dstar) # update D
            param,n_accept_sigmares,loglike = step_four(posterior_object_for_sample, loglike, D, param, ndim, n_accept_sigmares) 
            param,n_accept_rx,loglike       = step_five(posterior_object_for_sample, loglike, D, param, ndim, n_accept_rx)
            param,n_accept_rc,loglike       = step_six(posterior_object_for_sample, loglike, D, param, ndim, n_accept_rc)

            param_loglike = param + [loglike]
            chains.append(np.asarray(param_loglike))   # move on in sample space
            
            if iter % 1 == 0:
                print('current parameter vector \nand likelihood for iter {}: '.format(iter), param_loglike)
            if diagnose == True:
                # latent values from chain
                c = D[0::3]
                x1 = D[1::3]
                mB = D[2::3]
                D_save = [] # thinned latent vector to be saved
                for i in range(len(c)):
                    D_save.append(c[i])
                    D_save.append(x1[i])
                    D_save.append(mB[i])
                
                D_chain.append(D_save) # add to trace chain
                
            wr.writerow(param_loglike)    # write the parameter step as we go.

            if snapshot == True:
                if iter == niters-1:
                    latent_plots.plot_attributes(D, param, iter)

    acc_numbs =  [n_accept_cosmo, n_accept_B, n_accept_dstar, n_accept_sigmares, n_accept_rx, n_accept_rc]
    print('acc numbs: ', acc_numbs)
    acceptance_fracs = []
    acceptance_fracs = [i / niters for i in acc_numbs]

    print('sampling complete')
    for step in range(len(acc_numbs)):
        print('acceptance fraction for step {}: '.format(step + 1), acceptance_fracs[step])


    fname = outdir + 'post_chains.csv'
    with open(fname, 'w') as myfile:
        wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
        for i in chains:
            wr.writerow(i)
    
    if diagnose == True:
        fname = outdir + 'D_latent.csv'
        # save chain output for diagnostic purposes
        with open(fname, 'w') as myfile:
            wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
            for i in D_chain:
                wr.writerow(i)
   


    return acceptance_fracs