def age_conversion(z_after, z_now):
    #getting time
    t_now = cosmo.age(z_now)
    t_created = cosmo.age(z_after)
    t_lifetyme = t_now - t_created

    #converting from Gyr to Myr
    age = t_lifetyme.value * 1.e+3
    return age
Example #2
0
def plot_redshift_ax(ax,
                     z=[0.05, 0.5, 1, 2, 3],
                     cosmology=None,
                     method='time'):
    """Add twin axis for redshift to plots versus time

    Args:
        ax (object): Matplotlib axes object
        z (list, optional): List of redshifts. Defaults to [0.05, 0.5, 1, 2, 3].
        cosmology (object, optional): Astropy cosmology object. Defaults to None.
        method (str): Units of x-axis. Defaults to cosmic time [Gyr].
    Returns:
        object: Matplotlib axes object
    """
    if cosmology is None:
        from astropy.cosmology import Planck15 as cosmology

    if method == 'lookback':
        ages = cosmology.lookback_time(z).value
    elif method == 'time':
        ages = cosmology.age(z).value
    elif method == 'log':
        ages = np.log10(1 + np.asarray(z))
    else:
        raise NotImplementedError

    ax2 = ax.twiny()
    l, r = ax.get_xlim()
    ax2.set_xlim(l, r)
    ax2.set_xticks(ages)
    ax2.set_xticklabels(z)
    return ax2
Example #3
0
def z_at(coalescence_time, num_interp_points=1000):
    # get z_coal
    zs = np.linspace(0.0, 20.0, num_interp_points)
    age = cosmo.age(zs).value * 1e9

    check = interpolate.interp1d(age, zs)
    return check(coalescence_time)
Example #4
0
    def __init__(self,
                 N=10000,
                 zstart=5,
                 zend=0.1,
                 dt=0.1,
                 fmerge_end=0.5,
                 merger_mass_loss=0.3,
                 bins=np.linspace(6, 12, 25),
                 rf_file='red_frac.pkl'):
        """Initialise new GalPop instance from smf_init."""

        self.N0 = N
        self.ngal = N
        self.bins = bins
        self.dm = bins[1] - bins[0]
        self.lgM = bins[:-1] + 0.5 * self.dm
        self.zstart, self.zend, self.z = zstart, zend, zstart
        self.t = cosmo.age(self.z).value
        self.dt = dt
        tend = cosmo.age(zend).value
        self.nstep = int((tend - self.t) / dt) + 1
        self.nrem = self.nstep
        sfr_pars = sfr_z_pars(self.z)
        s0, M0, gamma = sfr_pars
        M = 10**(util.ran_fun(smf_init, bins[0], bins[-1], N))
        sfr = 10**(s0 - np.log10(1 + (M / M0)**-gamma))

        self.galaxies = [Galaxy(M[i], sfr[i]) for i in range(N)]
        self.phi_init = smf_init(self.lgM) * self.ngal / np.sum(
            smf_init(self.lgM))
        self.nmerge_minor = 0
        self.nmerge_major = 0
        self.merger_mass_loss = merger_mass_loss
        self.ic_mass = 0
        # Normalize merger rate to obtain end merged fraction
        res = scipy.integrate.quad(merger_rate, self.t, tend, args=(1))
        mint = res[0]
        #        self.m0 = fmerge_end*N/mint/(1 + fmerge_end)
        self.m0 = fmerge_end * N / mint
        #    qf_step = qf_end/nstep
        #    print(nstep, 'time steps')

        # Target quiescent fraction
        (Mfit, rf) = pickle.load(open(rf_file, 'rb'))
        self.qf_end = np.interp(self.lgM, Mfit, rf)
        self.smf_plot(logy=1)
Example #5
0
    def DE2(self, item, t):
        Mhalo = item[0]
        Mgas = item[1]
        # Mstar = item[2]
        z = z_at_value(cosmo.age, (cosmo.age(0).value - self.t) * u.Gyr,
                       zmin=-1e-6)

        dMh = np.zeros(len(Mhalo))
        sfr = self.SFR(Mgas, z) * self.step * 1e9
        dMg = self.delta_Mgas(dMh, sfr, Mhalo, z)
        dMs = sfr * (1 - self.R)

        return np.array([dMh, dMg, dMs])
    def gen_field_analytic(self, p):
        self.logMStar_setup = np.arange(self.logM_min, 12, 0.01)

        phi_init_sf = self.schechter_SMF_func(self.logMStar_setup)
        phi_init_q = np.zeros(len(phi_init_sf))

        t_init = cosmo.lookback_time(self.z_init).value  #gyr
        t_final = cosmo.lookback_time(self.z_final).value  #gyr

        t = t_init
        z = self.z_init

        phi_sf = [phi_init_sf]
        phi_q = [phi_init_q]
        z_range = [z]

        integ_an = integration_utils(self.DE_2, hmax=0.1, hmin=1e-5,
                                     htol=1e-9)  #_an == analytic
        condition = True  # Always run at least once
        force = False

        while t >= t_final and condition:
            inits = np.array([phi_init_sf, phi_init_q], copy=True)
            output = integ_an.RK45(p,
                                   inits,
                                   t,
                                   force,
                                   mpp=False,
                                   analytic=True)
            phi_init_sf, phi_init_q = output[0, :], output[1, :]

            phi_sf.append(np.copy(phi_init_sf))
            phi_q.append(np.copy(phi_init_q))

            if (t - integ_an.step) > t_final:
                pass
            else:
                integ_an.step = t - t_final
                force = True
                condition = False

            print(t, integ_an.step)

            t -= integ_an.step
            z = z_at_value(cosmo.age, (cosmo.age(0).value - t) * u.Gyr,
                           zmin=-1e-6)

            z_range.append(z)

        self.phi_sf_interp_an = interp2d(self.logMStar_setup, z_range, phi_sf)
        self.phi_q_interp_an = interp2d(self.logMStar_setup, z_range, phi_q)
Example #7
0
	def get_sf_qt_mass_lookback_time_bins(self, tnodes, mnodes):
		self.id_lookt_mass = {}
		age_universe = cosmo.age(0).value # 13.797617455819209 Gyr
		znodes = np.array([z_at_value(cosmo.age,(age_universe - i) * u.Gyr) for i in tnodes])

		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				ind_mt_sf =( (self.table.sfg == 1) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
				ind_mt_qt =( (self.table.sfg == 0) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
					(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )

				self.id_lookt_mass['lookt_'+clean_args(str('{:.2f}'.format(tnodes[iz])))+'_'+clean_args(str('{:.2f}'.format(tnodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_sf'] = self.table.ID[ind_mt_sf].values
				self.id_lookt_mass['lookt_'+clean_args(str('{:.2f}'.format(tnodes[iz])))+'_'+clean_args(str('{:.2f}'.format(tnodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_qt'] = self.table.ID[ind_mt_qt].values
Example #8
0
	def get_subpop_ids(self, znodes, mnodes, pop_dict, linear_mass=1, lookback_time = False):
		self.subpop_ids = {}
		if lookback_time == True:
			age_universe = cosmo.age(0).value # 13.797617455819209 Gyr
			znodes = np.array([z_at_value(cosmo.age,(age_universe - i) * u.Gyr) for i in znodes])

		for iz in range(len(znodes[:-1])):
			for jm in range(len(mnodes[:-1])):
				for k in pop_dict:
					if linear_mass == 1:
						ind_mz =( (self.table.sfg.values == pop_dict[k][0]) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(10**self.table[self.mkey] >= 10**np.min(mnodes[jm:jm+2])) & (10**self.table[self.mkey] < 10**np.max(mnodes[jm:jm+2])) )
					else:
						ind_mz =( (self.table.sfg == pop_dict[k][0]) & (self.table[self.zkey] >= np.min(znodes[iz:iz+2])) & (self.table[self.zkey] < np.max(znodes[iz:iz+2])) &
							(self.table[self.mkey] >= np.min(mnodes[jm:jm+2])) & (self.table[self.mkey] < np.max(mnodes[jm:jm+2])) )

					self.subpop_ids['z_'+clean_args(str('{:.2f}'.format(znodes[iz])))+'_'+clean_args(str('{:.2f}'.format(znodes[iz+1])))+'__m_'+clean_args(str('{:.2f}'.format(mnodes[jm])))+'_'+clean_args(str('{:.2f}'.format(mnodes[jm+1])))+'_'+k] = self.table.ID[ind_mz].values
Example #9
0
def calc_Q(N=2001,zlow=0,zhigh=20, ap=0.01376, bp=3.26, cp=2.59, dp=5.68):
	global zbuf, tbuf
	z = np.linspace(zhigh,zlow,N)
	if zbuf is None or np.any(zbuf != z):
		t_Gyr = np.array(cosmo.age(z)) # In units of Gyr
		t = t_Gyr*GYR_S # Time t in seconds
		zbuf, tbuf = z, t
	else:
		z, t = zbuf, tbuf
	Q = np.zeros(N)
	dt = np.zeros(N)
	Qprev = np.zeros(N)
	Qdotprev = np.zeros(N)
	'''Evolving the differential equation Qdot to solve for Q(z) using Euler's method'''
	for i in xrange(1, N):
		Qprev[i] = Q[i-1]
		Qdotprev[i] = Qdot(z[i-1], Qprev[i], ap=ap, bp=bp, cp=cp, dp=dp)
		dt[i] = t[i] - t[i-1]	
		Q[i] = Qprev[i] + dt[i]*Qdotprev[i]
		if Q[i] > 1.0:
			Q[i] = 1.0
	return Q, z
Example #10
0
def gen_params(Ngal):
    """Generate the distribution of parameters."""

    print('starting...')
    # Input parameters.
    D = {}
    for key, (low, high) in ranges.items():
        D[key] = np.random.uniform(low, high, size=Ngal)

    # Following:
    # https://github.com/dfm/python-fsps/issues/68
    # I have connected the age and redshift using the cosmology.
    D['tage'] = np.array(Planck15.age(D['zred']))
    D['sf_start'] = np.random.random(Ngal) * D['tage']  #-0.1)

    # For consistency, the gas phase metallicity should be set to
    # the same value as for the galaxy (FSPS documentation).
    D['gas_logz'] = D['logzsol']
    print('finished random numbers...')

    df1 = pd.DataFrame(D)

    return df1
Example #11
0
P.rc('ytick', labelsize='medium')
P.rc('axes', labelsize='medium')

pad_zmet = np.array([
    0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0008, 0.001, 0.0012, 0.0016,
    0.0020, 0.0025, 0.0031, 0.0039, 0.0049, 0.0061, 0.0077, 0.0096, 0.012,
    0.015, 0.019, 0.024, 0.03
])
pad_zmetsol = 0.019

pad_solmet = pad_zmet / pad_zmetsol
zmets = (np.linspace(0, 10, 11) * 2 + 1).astype(int)
zsolmets = pad_solmet[(zmets - 1).astype(int)]

#ages = Planck15.age(10**np.linspace(-0.824, -2.268, 25))[-7:-5].value
time_steps = Planck15.age(10**np.linspace(-0.824, -2.268, 15)).value
tqs = np.flip(13.805 - 10**(np.linspace(7, 10.14, 50)) / 1e9, axis=0)
taus = 10**np.linspace(6, 9.778, 50) / 1e9

with np.load(
        '../../data/iteration_number_em_indx_par_pool_mapped.npz') as idxs:
    un, unidx = np.unique(idxs['idx'], return_index=True)

with np.load('../../data/em_indx_par_pool_mapped.npz') as orig_pred:
    pred = orig_pred['lookup'][unidx, 0, :].reshape(len(tqs), len(taus),
                                                    len(time_steps),
                                                    len(zmets), 8)
# with np.load('em_indx_err_par_pool_mapped.npz') as orig_pred_err:
#     pred_err = orig_pred_err['lookuperr'][unidx, :].reshape(len(tqs), len(taus), len(time_steps), len(zmets), 8)

print('interpolating, maybe go grab a drink, we`ll be here a while...')
    z = z_at_age_interp(t)
    return z


def age_at_redshift(z):
    '''
        t in billions of years
        '''
    return age_at_z_interp(z)


#interpolation functions to speed up:
redshifts = linspace(0.0, 20, 1000)
redshifts = append(redshifts, linspace(20, 900, 100))

age_at_z = array([Planck15.age(z) / u.Gyr for z in redshifts])
age_at_z_interp = interp1d(redshifts, age_at_z)

epsilon = 0.0005
ages = linspace(epsilon, age_at_redshift(0) - epsilon, 1000)
z_at_ages = array([z_at_value(Planck15.age, age * u.Gyr) for age in ages])
ages = append(ages, age_at_redshift(0))
z_at_ages = append(z_at_ages, 0)
z_at_age_interp = interp1d(ages, z_at_ages)


def z_at_formation(z, t_D):
    '''takes the redshift of a source at merger and the delay since formation,
        outputs the redshift at which the binary was formed
        '''
    age = age_at_redshift(z) - t_D
Example #13
0
def sample(ndim=3,
           nwalkers=100,
           nsteps=100,
           burnin=500,
           start=[1.0, 13.0, 1.0],
           ha=np.nan,
           e_ha=np.nan,
           oii=np.nan,
           e_oii=np.nan,
           d4000=np.nan,
           e_d4000=np.nan,
           hb=np.nan,
           e_hb=np.nan,
           hdA=np.nan,
           e_hdA=np.nan,
           mgfe=np.nan,
           e_mgfe=np.nan,
           age=Planck15.age(0).value,
           ID=0):
    """ Function to implement the emcee EnsembleSampler function for the sample of galaxies input. Burn in is run and calcualted fir the length specified before the sampler is reset and then run for the length of steps specified. 
        
        :ndim:
        The number of parameters in the model that emcee must find. In this case it always 2 with tq, tau.
        
        :nwalkers:
        The number of walkers that step around the parameter space. Must be an even integer number larger than ndim. 
        
        :nsteps:
        The number of steps to take in the final run of the MCMC sampler. Integer.
        
        :burnin:
        The number of steps to take in the inital burn-in run of the MCMC sampler. Integer. 
        
        :start:
        The positions in the tq and tau parameter space to start for both disc and smooth parameters. An array of shape (1,4).
        
        :age:
        Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (N,1) or (N,).
        
        :id:
        ID number to specify which galaxy this run is for.
        
        :ra:
        right ascension of source, used for identification purposes
        
        :dec:
        declination of source, used for identification purposes
        
        RETURNS:
        :samples:
        Array of shape (nsteps*nwalkers, 4) containing the positions of the walkers at all steps for all 4 parameters.
        :samples_save:
        Location at which the :samples: array was saved to. 
        
        """

    print('emcee running...')
    p0 = [start + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
    sampler = emcee.EnsembleSampler(nwalkers,
                                    ndim,
                                    lnprob,
                                    threads=4,
                                    args=(ha, e_ha, oii, e_oii, d4000, e_d4000,
                                          hb, e_hb, hdA, e_hdA, mgfe, e_mgfe,
                                          age))
    """ Burn in run here..."""
    pos, prob, state = sampler.run_mcmc(p0, burnin)
    lnp = sampler.flatlnprobability
    np.savez('lnprob_burnin_' + str(ID) + '.npz', lnp=lnp)
    samples = sampler.chain[:, :, :].reshape((-1, ndim))
    samples_save = 'samples_burn_in_' + str(ID) + '.npz'
    np.savez(samples_save, samples=samples)
    sampler.reset()
    print('Burn in complete...')
    """ Main sampler run here..."""
    sampler.run_mcmc(pos, nsteps)
    lnpr = sampler.flatlnprobability
    np.savez('lnprob_run_' + str(ID) + '.npz', lnp=lnpr)
    samples = sampler.chain[:, :, :].reshape((-1, ndim))
    samples_save = 'samples_' + str(ID) + '.npz'
    np.savez(samples_save, samples=samples)
    print('Main emcee run completed.')
    sampler.reset()
    try:
        acor = sampler.get_autocorr_time(c=1)
        return samples, samples_save, sampler.acceptance_fraction, acor
    except AutocorrError:
        return samples, samples_save, sampler.acceptance_fraction, [
            np.nan, np.nan, np.nan
        ]
Example #14
0
    #ax3.axhline(truth[2], color='r')
    ax1.tick_params(axis='x', labelbottom='off')
    ax2.tick_params(axis='x', labelbottom='off')
    ax3.set_xlabel(r'step number')
    ax1.set_ylabel(r'$Z$')
    ax2.set_ylabel(r'$t_{quench}$')
    ax3.set_ylabel(r'$\tau$')
    P.subplots_adjust(hspace=0.1)
    #save_fig = './test_log_one/walkers_steps_'+str(ID)+'_log.pdf'
    #fig.savefig(save_fig)
    return fig


age = 13.64204

time_steps = Planck15.age(10**np.linspace(-0.824, -2.268,
                                          20)).reshape(-1, 1, 1).value

emlinesdata = np.load('emlines_data.npy')
indx_names = np.load('index_names.npy')

consteml = np.load('emission_line_params_constsfh_3.npy')
constidm = np.load('abs_index_params_constsfh_3.npy')

constews = np.zeros(len(consteml) * len(emlinesdata['name'])).reshape(
    len(consteml), len(emlinesdata['name']))
constidms = np.zeros(len(constidm) * len(indx_names)).reshape(
    len(constidm), len(indx_names))

for n in range(len(consteml)):
    constews[n, :] = consteml[n]['EW']
    constidms[n, :] = constidm[n]['INDX']
Example #15
0
    import matplotlib.pyplot as plt

    names = ['z', 'm1', 'm2']
    data = np.genfromtxt(
        '../../../simulation/data_ready_june_snap_lim_1.txt', names=True, dtype=None)
    z_coal = data['redshift']

    dz = 0.001
    zs_i = np.arange(0.0, 8.0+dz, dz)
    zs_i_plus_1 = zs_i + dz
    zs = zs_i + dz/2.

    # TODO: check units

    dVc_dz = (cosmo.comoving_volume(zs_i_plus_1).value - cosmo.comoving_volume(zs_i).value)/dz
    dz_dt = dz/(cosmo.age(zs_i_plus_1).value*1e9 - cosmo.age(zs_i).value*1e9)

    weights = np.abs(np.interp(z_coal, zs, dz_dt*dVc_dz/(1+zs)))
    kde = KDEResample(
        np.asarray([data['redshift'], data['mass_new_prev_in'], data['mass_new_prev_out']]).T,
        weights, names)

    kde.make_kernel()

    st = time.time()
    gc = GenerateCatalog(0.8, 100.0, kde, num_catalogs=100000)
    cats = gc.make_catalogs()
    print(time.time() - st)

    plt.hist(
        np.log10(data['mass_new_prev_in']), bins=30, weights=weights, density=True, histtype='step',
Example #16
0
def set_paper_random_seed():
    from astropy.cosmology import Planck15
    seed = int(Planck15.age(z=0)/u.kyr)
    np.random.seed(seed=seed)
 def DE_2(self, inits, t):
     phi_sf, phi_q = inits[0, :], inits[1, :]
     z = z_at_value(cosmo.age, (cosmo.age(0).value - t) * u.Gyr, zmin=-1e-6)
     dn_blue = self.dN_blue(self.logMStar_setup, z, phi_sf)
     dn_red = self.dN_red(self.logMStar_setup, z, phi_sf)
     return np.array([dn_blue, dn_red])
Example #18
0
def calage(filename):
	a = read_all(filename,'AGE ', 4, 1, np.dtype('float32'))
	z = 1. / a - 1
	age = z
	age = cosmo.age(z).value
	return age
Example #19
0
def build_model(objid=1,
                non_param_sfh=False,
                dirichlet_sfh=False,
                add_duste=False,
                add_neb=False,
                add_agn=False,
                switch_off_mix=False,
                marginalize_neb=True,
                n_bins_sfh=8,
                use_eline_prior=False,
                add_jitter=False,
                fit_continuum=False,
                switch_off_phot=False,
                switch_off_spec=False,
                fixed_dust=False,
                **extras):
    """Construct a model.  This method defines a number of parameter
    specification dictionaries and uses them to initialize a
    `models.sedmodel.SedModel` object.
    :param object_redshift:
        If given, given the model redshift to this value.
    :param add_dust: (optional, default: False)
        Switch to add (fixed) parameters relevant for dust emission.
    :param add_neb: (optional, default: False)
        Switch to add (fixed) parameters relevant for nebular emission, and
        turn nebular emission on.
    """
    # read in data table
    obs = build_obs(objid=objid)

    # get SFH template
    if non_param_sfh and not dirichlet_sfh:
        model_params = TemplateLibrary["continuity_sfh"]
    elif dirichlet_sfh:
        model_params = TemplateLibrary["dirichlet_sfh"]
    else:
        model_params = TemplateLibrary["parametric_sfh"]

    # fit for redshift
    # use catalog value as center of the prior
    model_params["zred"]['isfree'] = True
    model_params["zred"]["init"] = obs['redshift']
    model_params["zred"]["prior"] = priors.TopHat(mini=obs['redshift'] - 0.005,
                                                  maxi=obs['redshift'] + 0.005)

    # get SFH template
    if non_param_sfh:
        t_univ = cosmo.age(obs['redshift']).value
        tbinmax = 0.95 * t_univ * 1e9
        lim1, lim2, lim3, lim4 = 7.4772, 8.0, 8.5, 9.0
        agelims = [0, lim1, lim2, lim3] + np.log10(
            np.linspace(10**lim4, tbinmax,
                        n_bins_sfh - 4)).tolist() + [np.log10(t_univ * 1e9)]
        if dirichlet_sfh:
            model_params = adjust_dirichlet_agebins(model_params,
                                                    agelims=agelims)
            model_params["total_mass"]["prior"] = priors.LogUniform(mini=3e9,
                                                                    maxi=1e12)
        else:
            model_params = adjust_continuity_agebins(model_params,
                                                     tuniv=t_univ,
                                                     nbins=n_bins_sfh)
            agebins = np.array([agelims[:-1], agelims[1:]])
            model_params['agebins']['init'] = agebins.T
            model_params["logmass"]["prior"] = priors.TopHat(mini=9.5,
                                                             maxi=12.0)
    else:
        model_params["tau"]["prior"] = priors.LogUniform(mini=1e-1, maxi=10)
        model_params["tage"]["prior"] = priors.TopHat(
            mini=1e-3, maxi=cosmo.age(obs['redshift']).value)
        model_params["mass"]["prior"] = priors.LogUniform(mini=3e9, maxi=1e12)

    # metallicity (no mass-metallicity prior yet!)
    if fixed_dust:
        model_params["logzsol"]["prior"] = priors.ClippedNormal(mini=-1.0,
                                                                maxi=0.19,
                                                                mean=0.0,
                                                                sigma=0.15)
    else:
        model_params["logzsol"]["prior"] = priors.TopHat(mini=-1.0, maxi=0.19)

    # complexify the dust
    if fixed_dust:
        model_params['dust_type']['init'] = 2
        model_params["dust2"]["prior"] = priors.ClippedNormal(mini=0.0,
                                                              maxi=4.0,
                                                              mean=0.3,
                                                              sigma=1)
        model_params['dust1'] = {
            "N": 1,
            "isfree": False,
            "init": 0.0,
            "units": "optical depth towards young stars",
            "prior": None
        }
    else:
        model_params['dust_type']['init'] = 4
        model_params["dust2"]["prior"] = priors.ClippedNormal(mini=0.0,
                                                              maxi=4.0,
                                                              mean=0.3,
                                                              sigma=1)
        model_params["dust_index"] = {
            "N": 1,
            "isfree": True,
            "init": 0.0,
            "units": "power-law multiplication of Calzetti",
            "prior": priors.TopHat(mini=-1.0, maxi=0.4)
        }

        def to_dust1(dust1_fraction=None, dust1=None, dust2=None, **extras):
            return (dust1_fraction * dust2)

        model_params['dust1'] = {
            "N": 1,
            "isfree": False,
            'depends_on': to_dust1,
            "init": 0.0,
            "units": "optical depth towards young stars",
            "prior": None
        }
        model_params['dust1_fraction'] = {
            'N': 1,
            'isfree': True,
            'init': 1.0,
            'prior': priors.ClippedNormal(mini=0.0,
                                          maxi=2.0,
                                          mean=1.0,
                                          sigma=0.3)
        }

    # velocity dispersion
    model_params.update(TemplateLibrary['spectral_smoothing'])
    model_params["sigma_smooth"]["prior"] = priors.TopHat(mini=40.0,
                                                          maxi=400.0)

    # Change the model parameter specifications based on some keyword arguments
    if add_duste:
        # Add dust emission (with fixed dust SED parameters)
        model_params.update(TemplateLibrary["dust_emission"])
        model_params['duste_gamma']['isfree'] = True
        model_params['duste_gamma']['init'] = 0.01
        model_params['duste_gamma']['prior'] = priors.LogUniform(mini=1e-4,
                                                                 maxi=0.1)
        model_params['duste_qpah']['isfree'] = True
        model_params['duste_qpah']['prior'] = priors.TopHat(mini=0.5, maxi=7.0)
        model_params['duste_umin']['isfree'] = True
        model_params['duste_umin']['init'] = 1.0
        model_params['duste_umin']['prior'] = priors.ClippedNormal(mini=0.1,
                                                                   maxi=15.0,
                                                                   mean=2.0,
                                                                   sigma=1.0)

    if add_agn:
        # Allow for the presence of an AGN in the mid-infrared
        model_params.update(TemplateLibrary["agn"])
        model_params['fagn']['isfree'] = True
        model_params['fagn']['prior'] = priors.LogUniform(mini=1e-5, maxi=3.0)
        model_params['agn_tau']['isfree'] = True
        model_params['agn_tau']['prior'] = priors.LogUniform(mini=5.0,
                                                             maxi=150.)

    if add_neb:
        # Add nebular emission
        model_params.update(TemplateLibrary["nebular"])
        model_params['gas_logu']['isfree'] = True
        model_params['gas_logu']['init'] = -2.0
        model_params['gas_logz']['isfree'] = True
        _ = model_params["gas_logz"].pop("depends_on")
        model_params['nebemlineinspec'] = {
            'N': 1,
            'isfree': False,
            'init': False
        }

        if marginalize_neb:
            model_params.update(TemplateLibrary['nebular_marginalization'])
            #model_params.update(TemplateLibrary['fit_eline_redshift'])
            model_params['eline_prior_width']['init'] = 3.0
            model_params['use_eline_prior']['init'] = use_eline_prior

            # only marginalize over a few (strong) emission lines
            if True:
                #SPS_HOME = os.getenv('SPS_HOME')
                #emline_info = np.genfromtxt(SPS_HOME + '/data/emlines_info.dat', dtype=[('wave', 'f8'), ('name', 'S20')], delimiter=',')
                to_fit = [
                    '[OII]3726', '[OII]3729', 'H 3798', 'H 3835', 'H 3889',
                    'H 3970', '[NeIII]3870', 'H delta 4102', 'H gamma 4340',
                    '[OIII]4364', 'H beta 4861', '[OIII]4960', '[OIII]5007',
                    '[NII]6549', 'H alpha 6563', '[NII]6585'
                ]
                #idx = np.array([1 if name in to_fit else 0 for name in emline_info['name']], dtype=bool)
                model_params['lines_to_fit']['init'] = to_fit

            # model_params['use_eline_prior']['init'] = False
        else:
            model_params['nebemlineinspec']['init'] = True

    # This removes the continuum from the spectroscopy. Highly recommend
    # using when modeling both photometry & spectroscopy
    if fit_continuum:
        # order of polynomial that's fit to spectrum
        polyorder_estimate = int(
            np.clip(
                np.round((np.min([7500 * (obs['redshift'] + 1), 9150.0]) -
                          np.max([3525.0 * (obs['redshift'] + 1), 6000.0])) /
                         (obs['redshift'] + 1) * 100), 10, 30))
        model_params['polyorder'] = {
            'N': 1,
            'init': polyorder_estimate,
            'isfree': False
        }
        # fit for normalization of spectrum
        # model_params['spec_norm'] = {'N': 1,
        #                              'init': 0.8,
        #                              'isfree': True,
        #                              'prior': priors.Normal(sigma=0.2, mean=0.8),
        #                              'units': 'f_true/f_obs'}

    # This is a pixel outlier model. It helps to marginalize over
    # poorly modeled noise, such as residual sky lines or
    # even missing absorption lines
    if not switch_off_mix:
        model_params['f_outlier_spec'] = {
            "N": 1,
            "isfree": True,
            "init": 0.01,
            "prior": priors.TopHat(mini=1e-5, maxi=0.5)
        }
        model_params['nsigma_outlier_spec'] = {
            "N": 1,
            "isfree": False,
            "init": 50.0
        }

    # This is a multiplicative noise inflation term. It inflates the noise in
    # all spectroscopic pixels as necessary to get a good fit.
    if add_jitter:
        model_params['spec_jitter'] = {
            "N": 1,
            "isfree": True,
            "init": 1.0,
            "prior": priors.TopHat(mini=1.0, maxi=5.0)
        }

    # Now instantiate the model using this new dictionary of parameter specifications
    model = PolySpecModel(model_params)

    return model
 def DE(self, item, t):
     mass, ssfr_params = item[0], item[1]
     z = z_at_value(cosmo.age, (cosmo.age(0).value - t) * u.Gyr, zmin=-1e-6)
     return mass * (self.sSFR(np.log10(mass), z, ssfr_params)) * 1e9
Example #21
0
def detection_rate_main(num_catalogs,
                        t_obs,
                        duration,
                        fp,
                        evolve_kwargs,
                        kde_key_guide,
                        evolve_class,
                        merger_rate_kwargs,
                        snr_kwargs,
                        only_detectable=True,
                        snr_threshold=8.0,
                        num_repeats=1):

    begin_time = time.time()

    input_data = np.genfromtxt(fp, names=True, dtype=None)

    mbh = evolve_class(**evolve_kwargs)
    mbh.evolve()

    # mergers in hubble time
    inds_keep = np.where(mbh.coalescence_time < cosmo.age(0.0).value * 1e9)[0]

    # merger_rate_kwargs['z_vals'] = mbh.z_coal =
    # np.interp(mbh.coalescence_time[inds_keep], age, zs)
    merger_rate_kwargs['z_vals'] = mbh.z_coal = z_at(
        mbh.coalescence_time[inds_keep])

    # Merger Rate Per Year ####
    mr_class = MergerRate(**merger_rate_kwargs)
    merger_rate = mr_class.merger_rate()
    print('merger rate:', merger_rate)

    # Prepare KDE

    input_to_kde = np.asarray([
        input_data[kde_key_guide['m1']][inds_keep],
        input_data[kde_key_guide['m2']][inds_keep], mbh.z_coal
    ]).T
    kde_weights = mr_class.weights()

    # kde_kwargs = {'names':kde_key_guide.keys(), 'data': input_to_kde, 'weights':kde_weights}

    kde = KDEResample(data=input_to_kde,
                      weights=kde_weights,
                      names=['m1', 'm2', 'z_coal'])
    kde.make_kernel(bound=1e-6)

    output = {}
    for repeat in range(num_repeats):
        # Generate Catalog
        gc = GenerateCatalog(poisson_parameter=merger_rate,
                             duration=duration,
                             binary_kde=kde)
        gc.make_catalogs(num_catalogs=num_catalogs)

        # Find SNRs

        # TODO: REMOVING HIGH MASS RATIOS BECAUSE PHENOMD NOT SUITABLE. CHECK THESE
        inds_keep = np.where(mass_ratio_func(gc.m1, gc.m2) > 1e-4)[0]

        for name in ['catalog_num', 't_event', 'm1', 'm2', 'z_coal']:
            setattr(gc, name, getattr(gc, name)[inds_keep])

        # start and end time of waveform
        st = gc.t_event
        et = 0.0 * ((st - t_obs) < 0.0) + (st - t_obs) * ((st - t_obs) >= 0.0)

        spin = snr_kwargs['spin']
        snr_out = snr(gc.m1, gc.m2, spin, spin, gc.z_coal, st, et,
                      **snr_kwargs)

        names = 'cat,t_event,m1,m2,z_coal,snr,snr_ins,snr_mr'

        if isinstance(snr_kwargs['sensitivity_curves'], str):
            snr_kwargs['sensitivity_curves'] = [
                snr_kwargs['sensitivity_curves']
            ]

        for sc in snr_kwargs['sensitivity_curves']:
            if only_detectable:
                inds_keep = np.where(snr_out[sc + '_wd_all'] > 8.0)[0]

            else:
                for sc in snr_kwargs['sensitivity_curves']:
                    inds_keep = np.arange(len(snr_out[sc + '_wd_all']))

            out_list = [
                gc.catalog_num[inds_keep], gc.t_event[inds_keep],
                gc.m1[inds_keep], gc.m2[inds_keep], gc.z_coal[inds_keep]
            ]

            snr_final = [
                snr_out[sc + '_wd_all'][inds_keep],
                snr_out[sc + '_wd_ins'][inds_keep],
                (snr_out[sc + '_wd_mrg'][inds_keep]**2 +
                 snr_out[sc + '_wd_rd'][inds_keep]**2)**(1 / 2)
            ]

            trans = np.core.records.fromarrays(out_list + snr_final,
                                               names=names)
            if repeat == 0:
                output[sc] = trans
            else:
                output[sc] = np.concatenate([output[sc], trans])
        print(repeat + 1, 'out of', num_repeats)
    print('Total Duration:', time.time() - begin_time)
    return output
Example #22
0
from hoki import load
import numpy as np
from glob import glob
from spectacle.h5py_utils import write_data_h5py
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import z_at_value
import astropy.units as u

model_dir = 'BPASSv2.2.1_sin-imf_chab300'
fname = 'output/bpass_sin.h5'

models = glob(model_dir + '/*')

output_temp = load.model_output(models[0])

print(cosmo.age(0).value)
ages = np.array([float(a) for a in output_temp.columns[1:]])
ages_Gyr = 10**ages / 1e9  # Gyr
age_mask = ages_Gyr < cosmo.age(0).value - 0.4  # Gyr
ages = ages[age_mask]
ages_Gyr = ages_Gyr[age_mask]

scale_factors = cosmo.scale_factor(
    [z_at_value(cosmo.lookback_time, age * u.Gyr) for age in ages_Gyr])

wl = output_temp['WL'].values
metallicities = np.array([None] * len(models))

spec = np.zeros((len(metallicities), len(ages), len(wl)))

for i, mod in enumerate(models):
Example #23
0
def snitch(ha,
           e_ha,
           d4000,
           e_d4000,
           hb,
           e_hb,
           hdA,
           e_hdA,
           mgfe,
           e_mgfe,
           redshift,
           ident,
           opstart=[1.0, -1.0, 1.0]):

    age = Planck15.age(redshift).value
    nll = lambda *args: -lnprobability(*args)

    nwalkers = 100  # number of monte carlo chains
    nsteps = 200  # number of steps in the monte carlo chain
    burnin = 1000  # number of steps in the burn in phase of the monte carlo chain
    ndim = 3  # number of dimensions in the SFH model

    # idx = np.searchsorted(time_steps.flatten(), age)
    # if idx == len(time_steps.flatten()):
    #     idx = len(time_steps.flatten())-1
    # else:
    #     pass
    # # newmasked_sp = masked_sp[idx,:,:,:,:]
    # # func = interpolate.RegularGridInterpolator((tqs, np.log10(taus), zsolmets), newmasked_sp, method='linear', bounds_error=False, fill_value=np.nan)
    # newsv = sv[30000*(idx):30000*(idx+1)][:,1:]
    # newmasked_sp = masked_sp[idx,:,:,:,:]
    # func = interpolate.NearestNDInterpolator(newsv, newmasked_sp.reshape(-1, 9))
    func = np.sin

    result_bh = basinhopping(nll,
                             opstart,
                             minimizer_kwargs={
                                 "args": (ha, e_ha, d4000, e_d4000, hb, e_hb,
                                          hdA, e_hdA, mgfe, e_mgfe, age, func),
                                 "method":
                                 'Nelder-Mead'
                             })
    print(result_bh)
    if "successfully" in result_bh.message[0]:
        start = result_bh['x']
    else:
        start = np.array(opstart)

    #The rest of this file calls the emcee module which is initialised in the sample function of the posterior file.
    samples = sample(path=os.getcwd(),
                     ndim=ndim,
                     nwalkers=nwalkers,
                     nsteps=nsteps,
                     burnin=burnin,
                     start=start,
                     ha=ha,
                     e_ha=e_ha,
                     d4000=d4000,
                     e_d4000=e_d4000,
                     hb=hb,
                     e_hb=e_hb,
                     hdA=hdA,
                     e_hdA=e_hdA,
                     mgfe=mgfe,
                     e_mgfe=e_mgfe,
                     age=age,
                     ID=ident)

    # This section of the code prunes the walker positions returned by emcee to remove those stuck in local minima. We follow the method
    # outlined in Hou et al. (2012).
    with np.load('lnprob_run_' + str(ident) + '.npz') as lnp:
        lk = np.mean(lnp['lnp'].reshape(nwalkers, nsteps), axis=1)
        idxs = np.argsort(-lk)
        slk = -lk[idxs]
        cluster_idx = np.argmax(
            np.diff(slk) > 10000 * np.diff(slk)[0] /
            (np.linspace(1,
                         len(slk) - 1,
                         len(slk) - 1) - 1)) + 1
        if cluster_idx > 1:
            #lnps = slk[:cluster_idx]
            samples = samples.reshape(
                nwalkers, nsteps,
                ndim)[idxs, :, :][:cluster_idx, :, :].reshape(-1, ndim)
        else:
            pass
        lnp.close()
        del lnp, lk, idxs, slk, cluster_idx

    dtq_mcmc, log_tau_mcmc, Z_mcmc, = map(
        lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
        zip(*np.percentile(samples, [16, 50, 84], axis=0)))

    # Save the inferred SFH parameters. In each case the elements are [best fit value, plus uncertainty,  minus uncertainty].
    # Note that the log tau values are recorded.
    np.save('inferred_SFH_parameters_ID_' + str(ident) + '.npy',
            [dtq_mcmc, log_tau_mcmc, Z_mcmc])

    # Produce the emcee corner plot showing which part of the parameter space the walkers explored.

    try:
        fig = corner.corner(samples,
                            labels=[r'$\delta t_q$', r'$\log_{10}\tau$', r'Z'],
                            quantiles=([0.16, 0.5, 0.84]))
        fig.savefig('snitch_output_corner_' + str(ident) + '.pdf')
        plt.close(fig)
    except (ValueError):
        pass

    ### The lines below produce the walker positions with each step for the burn in phase and the rest of the run.
    ### Uncomment this section if you'd like these produced.

    try:
        fig = walker_plot(samples, nwalkers, ndim, -1,
                          [dtq_mcmc[0], log_tau_mcmc[0], Z_mcmc[0]])
        fig.tight_layout()
        fig.savefig('walkers_steps_with_pruning_' + str(ident) + '.pdf')
        plt.close(fig)
    except (ValueError):
        pass

    with np.load('samples_burn_in_' + str(ident) + '.npz') as burninload:
        try:
            fig = walker_plot(burninload['samples'], nwalkers, ndim, -1,
                              [dtq_mcmc[0], log_tau_mcmc[0], Z_mcmc[0]])
            fig.tight_layout()
            fig.savefig('walkers_steps_burn_in_without_pruning_' + str(ident) +
                        '.pdf')
            plt.close(fig)
        except (ValueError):
            pass
        burninload.close()

    plt.close('all')

    # Print out the best fit values. Note that the actual value of tau in Gyr is printed, not the log value.

    print(
        r'Best fit Z value (3.s.f.) found by SNITCH for', ident,
        'input parameters are : [ {0:1.3f}, +{1:1.3f}, -{2:1.3f} ]'.format(
            Z_mcmc[0], Z_mcmc[1], Z_mcmc[2]))
    print(
        r'Best fit dt_q value (3.s.f.) found by SNITCH for', ident,
        'input parameters are : [ {0:1.3f}, +{1:1.3f}, -{2:1.3f} ]'.format(
            dtq_mcmc[0], dtq_mcmc[1], dtq_mcmc[2]))
    print(
        r'Best fit tau value (3.s.f.) found by SNITCH for', ident,
        'input parameters are : [ {0:1.3f}, +{1:1.3f}, -{2:1.3f} ]'.format(
            10**log_tau_mcmc[0],
            10**(log_tau_mcmc[1] + log_tau_mcmc[0]) - 10**log_tau_mcmc[0],
            10**log_tau_mcmc[0] - 10**(log_tau_mcmc[0] - log_tau_mcmc[2])))
    return (map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
                zip(*np.percentile(samples, [16, 50, 84], axis=0))))
Example #24
0
mpl6agn.add_columns([Z_mcmc, Z_p_mcmc, Z_m_mcmc, tq_mcmc, tq_p_mcmc, tq_m_mcmc, tau_mcmc, tau_p_mcmc, tau_m_mcmc])

nll = lambda *args: -lnprob(*args)
from scipy.optimize import minimize, basinhopping

import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (10000, hard))

nwalkers = 100 # number of monte carlo chains
nsteps= 100 # number of steps in the monte carlo chain
opstart = [0.7, 9.0, 1.25] # starting place of all the chains
burnin = 500 # number of steps in the burn in phase of the monte carlo chain
ndim = 3

ages = Planck15.age(mpl6agn['nsa_z']).value

define_em_db = SpectralFeatureDBDef(key='USEREM',
                              file_path='elpsnitch.par')
emlines  = EmissionLineDB(u"USEREM", emldb_list=define_em_db)
define_abs_db = SpectralFeatureDBDef(key='USERABS',
                              file_path='extindxsnitch.par')
abs_db = AbsorptionIndexDB(u"USERABS", indxdb_list=define_abs_db)
band_db = BandheadIndexDB(u"BHBASIC")
indx_names = np.hstack([abs_db.data["name"], band_db.data["name"]])

for n in trange(len(mpl6agn)):
    if os.path.isfile('../data/manga-'+str(mpl6agn[n]['plate'])+'-'+str(mpl6agn[n]['ifudsgn'].strip())+'-LOGCUBE.fits.gz'):
    #     pass
    # else: 
    #     r = requests.get(top_level_url+str(mpl6agn[n]['plate'])+'/stack/manga-'+str(mpl6agn[n]['plate'])+'-'+str(mpl6agn[n]['ifudsgn'].strip())+'-LOGCUBE.fits.gz', auth=HTTPBasicAuth(up[0].rstrip('\n'), up[1].rstrip('\n')), stream=True)
Example #25
0
    gamma = 0.316 + (1.319*(a-1) + 0.279*z)*v
    delta = 3.508 + (2.608*(a-1)+(-0.043)*z)*v
    return -np.log10(np.power(10,alpha*x) + 1) + delta * np.power(np.log10(1+np.exp(x)), gamma) / (1 + np.exp(np.power(10,-x)))

#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#


fig1,ax1 = plt.subplots(tight_layout=True)
fig2,ax2 = plt.subplots(tight_layout=True)
for M0 in M0_range:
    Mg = 0
    Ms = 0
    inits = np.array([M0, Mg, Ms])
    
    t_init  = cosmo.age(z_init).value
    t_final = cosmo.age(z_final).value
    t       = t_init
    dt      = 0.01
    
    print(np.log10(M0))
    
    while t < t_final:
        z = z_at_value(cosmo.age,t*u.Gyr, zmin=-1e-6)
        
        dMh = delta_Mhalo(inits[0],z) *dt*1e9
        
        print(np.log10(dMh))
        
        sfr = SFR(inits[1], z)        *dt*1e9
        dMg = delta_Mgas(dMh, sfr, inits[0],z) #   *dt*1e9
Example #26
0
 def update_step(self):
     self.t -= self.step
     self.z = z_at_value(cosmo.age, (cosmo.age(0).value - self.t) * u.Gyr,
                         zmin=-1e-6)
Example #27
0
#
######################################################################################################

pad_zmet = np.array([
    0.0002, 0.0003, 0.0004, 0.0005, 0.0006, 0.0008, 0.001, 0.0012, 0.0016,
    0.0020, 0.0025, 0.0031, 0.0039, 0.0049, 0.0061, 0.0077, 0.0096, 0.012,
    0.015, 0.019, 0.024, 0.03
])
pad_zmetsol = 0.019

pad_solmet = pad_zmet / pad_zmetsol
zmets = np.append((np.linspace(0, 10, 11) * 2 + 1).astype(int), [22.])
zsolmets = pad_solmet[(zmets - 1).astype(int)]

#ages = Planck15.age(10**np.linspace(-0.824, -2.268, 25))[-7:-5].value
time_steps = Planck15.age(10**np.linspace(-0.824, -3.295,
                                          15)).reshape(-1, 1, 1).value
taus = 10**np.linspace(6, 9.778, 50) / 1e9

with np.load(
        '/Users/smethurst/Projects/mangaagn/snitch/snitch/emls_par_pool_mapped_nozshift_ppxfcorrect_AA_12zmet.npz'
) as orig_pred:
    pred = orig_pred['lookup']

with np.load(
        '/Users/smethurst/Projects/mangaagn/snitch/snitch/emls_mask_par_pool_mapped_nozshift_ppxfcorrect_AA_12zmet.npz'
) as orig_mask:
    mask = orig_mask['lookupmask']

tqs = np.append(np.flip(
    time_steps.flatten()[0] -
    10**(np.linspace(7, np.log10(
Example #28
0
def age_at_redshift(z):
    '''
        t in billions of years
        '''
    t = Planck15.age(z) / u.Gyr
    return t
Example #29
0
def sample(path,
           ndim=3,
           nwalkers=100,
           nsteps=100,
           burnin=500,
           start=[1.0, -0.7, 1.0],
           ha=np.nan,
           e_ha=np.nan,
           d4000=np.nan,
           e_d4000=np.nan,
           hb=np.nan,
           e_hb=np.nan,
           hdA=np.nan,
           e_hdA=np.nan,
           mgfe=np.nan,
           e_mgfe=np.nan,
           age=Planck15.age(0).value,
           ID=0):
    """ 
    Function to implement the emcee EnsembleSampler function for the sample of galaxies input. Burn in is run and calcualted fir the length specified before the sampler is reset and then run for the length of steps specified. 
        
    INPUTS
        :path:
        Directory path which output should be saved to 

        :ndim:
        The number of parameters in the model that emcee must find. In the default expsfh case it always 3 with Z, tq, tau.
        
        :nwalkers:
        The number of walkers that step around the parameter space. Must be an even integer number larger than ndim. 
        
        :nsteps:
        The number of steps to take in the final run of the MCMC sampler. Integer.
        
        :burnin:
        The number of steps to take in the inital burn-in run of the MCMC sampler. Integer. 
        
        :start:
        The positions in the Z, tq and tau parameter space to start. An array of shape (1,3).
         
        :ha, d4000, hb, hdA, mgfe:
        Spectral parameter measurements for the spectra the user is trying to fit a SFH to - note that there are 5 measurements. 
        They do not necessarily have to match the names but the order has to match whatever lookup returns from the look up table. 

        :e_ha, e_d4000, e_hb, e_hdA, e_mgfe:
        Same as above but for the measurement error on each spectral parameter. 

        :age:
        Observed age of a galaxy, often calculated from the redshift i.e. at z=0.1 the age ~ 12.5. Must be in units of Gyr. An array of shape (1,).
        
        :id:
        ID number to specify which galaxy this run is for.
        
    RETURNS:
        :samples:
        Array of shape (nsteps*nwalkers, ndim) containing the positions of the walkers at all steps for each SFH parameter in ndim.
        
        """

    print('emcee running...')
    p0 = [start + 1e-4 * np.random.randn(ndim) for i in range(nwalkers)]
    # idx = np.searchsorted(time_steps.flatten(), age)
    # if idx == len(time_steps.flatten()):
    #     idx = len(time_steps.flatten())-1
    # else:
    #     pass
    func = np.sin
    # # newmasked_sp = masked_sp[idx,:,:,:,:]
    # # func = interpolate.RegularGridInterpolator((tqs, np.log10(taus), zsolmets), newmasked_sp, method='linear', bounds_error=False, fill_value=np.nan)
    # newsv = sv[30000*(idx):30000*(idx+1)][:,1:]
    # newmasked_sp = masked_sp[idx,:,:,:,:]
    # #func = interpolate.NearestNDInterpolator(newsv, newmasked_sp.reshape(-1, 9))
    # func = interpolate.NearestNDInterpolator(newsv, newmasked_sp.reshape(-1, 9))
    #func = interpolate.RegularGridInterpolator((tqs, np.log10(taus), zsolmets), newmasked_sp, method='linear', bounds_error=False, fill_value=np.nan)
    #gp_hodlr = george.GP(ker, solver=george.HODLRSolver, seed=42)
    #gp_hodlr.compute(sv[30000*(idx-1):30000*(idx+1)])
    sampler = emcee.EnsembleSampler(nwalkers,
                                    ndim,
                                    lnprobability,
                                    args=(ha, e_ha, d4000, e_d4000, hb, e_hb,
                                          hdA, e_hdA, mgfe, e_mgfe, age, func))
    """ Burn in run here..."""
    pos, prob, state = sampler.run_mcmc(p0, burnin)
    lnp = sampler.flatlnprobability
    np.savez(path + '/lnprob_burnin_' + str(ID) + '.npz', lnp=lnp)
    samples = sampler.chain[:, :, :].reshape((-1, ndim))
    samples_save = path + '/samples_burn_in_' + str(ID) + '.npz'
    np.savez(samples_save, samples=samples)
    sampler.reset()
    print('Burn in complete...')
    """ Main sampler run here..."""
    sampler.run_mcmc(pos, nsteps)
    lnpr = sampler.flatlnprobability
    np.savez(path + '/lnprob_run_' + str(ID) + '.npz', lnp=lnpr)
    samples = sampler.chain[:, :, :].reshape((-1, ndim))
    samples_save = path + '/samples_' + str(ID) + '.npz'
    np.savez(samples_save, samples=samples)
    print('Main emcee run completed.')
    sampler.reset()

    return samples