def create_model(model_name, config=None, *args, **kwargs): """ """ if config is None: config = get_config() model_class_name = get_model_config(model_name, config=config)["type"] return load_module(model_class_name)(model_name=model_name, config=config, *args, **kwargs)
def load_injections(config=None, logger=None): """ """ if config is None: config = get_config() if logger is None: logger = get_logger() path = os.path.join(config['directories']['data'], 'input', 'injections') filename = os.path.join(path, config['injections']['catalogue_filename']) df = pd.read_csv(filename) logger.debug(f"Loaded {df.shape[0]} injections from file.") return df
def load_gama_specobj(config=None, logger=None): """ """ if config is None: config = get_config() if logger is None: logger = get_logger() filename = os.path.join(config['directories']['data'], 'input', 'gama_specobj.csv') df = pd.read_csv(filename) df["ra"] = df["RA"] df["dec"] = df["DEC"] logger.debug(f"Loaded {df.shape[0]} GAMA objects.") return df
def __init__(self, config=None, logger=None, zmin=0.0001, zmax=2, z_samples=500): """ """ self.logger = get_logger() if logger is None else logger self.config = get_config() if config is None else config self._cosmo = self.config["cosmology"] self._zmin = zmin self._zmax = zmax zz = np.linspace(self._zmin, self._zmax, z_samples) self._cosmo_interps = self._make_cosmo_interps(zz)
def load_recovery_efficiency(config=None, logger=None): """ """ if config is None: config = get_config() if logger is None: logger = get_logger() dirname = os.path.join(config["directories"]["data"], "input", "injections") filename = config["injections"]["receff_filename"] logger.debug(f"Loading interpolated recover efficiency: {filename}.") with open(os.path.join(dirname, filename), "rb") as f: interp = pickle.load(f) fn = partial(recovery_efficiency, interp=interp) return fn
def load_leisman_udgs(config=None, **kwargs): """ """ from udgsizes.utils.mstar import EmpiricalSBCalculator if config is None: config = get_config() cosmo = config["cosmology"] filename = os.path.join(config['directories']['data'], 'input', 'leisman_17.csv') df = pd.read_csv(filename) df["redshift"] = df["cz"] / constants.c.to_value("km / s") distmod = cosmo.distmod(df["redshift"]).to_value("mag") df["absmag_g"] = df["gMAG"] df["absmag_r"] = df["absmag_g"] - df["g-r"] df["mag_g"] = df["absmag_g"] + distmod df["mag_r"] = df["mag_g"] - df["g-r"] df["gr"] = df["g-r"] df["re_phys"] = df["rh"].values df["re_obs"] = kpc_to_arcsec(df["re_phys"].values, redshift=df["redshift"].values, cosmo=cosmo) # Estimate ML from colour sb = EmpiricalSBCalculator(config=config, **kwargs) logmstar_temp = sb._logmstar.min() * np.ones( df.shape[0]) # Lowest stellar mass bin logml = np.array([ sb.calculate_logml_ab(a, colour_rest=b) for a, b in zip(logmstar_temp, df["gr"].values) ]) df["logml_ab"] = logml # Use ML estiamte to calculate logmstar logmstar = logml - 0.4 * df["absmag_r"] # Check this! df["logmstar"] = logmstar return df
def load_sample(config=None, logger=None, select=True): """ """ if config is None: config = get_config() if logger is None: logger = get_logger() filename = os.path.join(config['directories']['data'], 'input', 'lsbgs_public.csv') df = pd.read_csv(filename) if select: cond = select_samples(uae=df['mueff_av'].values, rec=df['rec_arcsec'].values) cond &= df["g_r"] < GR_MAX # cond &= df["is_red"].values == 0 df = df[cond].reset_index(drop=True) logger.debug(f"Loaded {df.shape[0]} LSBGs from file.") return df
ax0.set_xlim(xlim) ax0.set_ylim(ylim) ylim = ax1.get_ylim() ax1.plot([m1 - s1b, m1 - s1b], ylim, **CONFLINEKWARGSb) ax1.plot([m1 + s1b, m1 + s1b], ylim, **CONFLINEKWARGSb) ax1.set_ylim(ylim) # Plot best fit model # ax0.plot(metrics[xkey], metrics[ykey], "ro", markersize=5, label="Best fit") # Turn off tick labels on marginals plt.setp(ax1.get_xticklabels(), visible=False) plt.setp(ax2.get_yticklabels(), visible=False) # Consistent axis ranges ax1.set_xlim(ax0.get_xlim()) ax2.set_ylim(ax0.get_ylim()) bbox_to_anchor = (1.005, 0.9) ax1.legend(fontsize=FONTSIZE - 2.5, bbox_to_anchor=bbox_to_anchor) if SAVEFIG: config = get_config() image_dir = os.path.join(config["directories"]["data"], "images") image_filename = os.path.join(image_dir, f"corner_plot_{model_name}.png") plt.savefig(image_filename, dpi=150, bbox_inches="tight") plt.show(block=False)
def get_classifier_filename(config=None): if config is None: config = get_config() return os.path.join(config["directories"]["data"], "index_colour.pkl")
import os from udgsizes.core import get_config, get_logger from udgsizes.obs.sample import load_sample, load_gama_specobj from udgsizes.utils import xmatch if __name__ == "__main__": logger = get_logger() radius = 3. / 3600 df = load_sample(select=False) dfg = load_gama_specobj() dfm = xmatch.match_dataframe(df, dfg, radius=radius) logger.info(f"Matched {dfm.shape[0]} sources.") datadir = get_config()["directories"]["data"] dfm.to_csv(os.path.join(datadir, "input", "lsbgs_gama_xmatch.csv"))
def smf_plot(pbest, prange=None, which="schechter_baldry", pref=[-1.45], range=(4, 12), logy=True, nsamples=100, ax=None, show=True, config=None, pfixed_ref=[0.00071, 10.72], pfixed=None, fitxmax=15, linewidth=1.5, color="b", plot_ref=False, **kwargs): """ """ if config is None: config = get_config() cosmo = config["cosmology"] if pfixed is None: pfixed = pfixed_ref if ax is None: fig, ax = plt.subplots() func = load_module(f"udgsizes.model.components.mstar.{which}") try: func = partial(func, min=0) except Exception: pass xx = np.linspace(range[0], range[1], nsamples) is_fit = xx < fitxmax if plot_ref: yyref = [func(_, *pref, *pfixed_ref, cosmo=cosmo) for _ in xx] ax.plot(xx, yyref, 'k-', linewidth=linewidth) yy = [func(_, *pbest, *pfixed, cosmo=cosmo) for _ in xx[is_fit]] ax.plot(xx[is_fit], yy, '--', linewidth=linewidth, color=color, **kwargs) if prange is not None: mins = np.ones(is_fit.sum()) * np.inf maxs = -mins.copy() for ps in prange: ys = [func(_, *ps, *pfixed, cosmo=cosmo) for _ in xx[is_fit]] mins[:] = np.minimum(mins, ys) maxs[:] = np.maximum(maxs, ys) ax.fill_between(x=xx[is_fit], y1=mins, y2=maxs, alpha=0.2, color=color, linewidth=linewidth) if logy: ax.set_yscale("log") if show: plt.show(block=False) return ax
def load_gama_masses(config=None, logmstar_min=6, logmstar_max=13, z_max=0.1, gi_max=None, ur_max=None, gr_max=None, lambdar=True, n_max=2.5): """ http://www.gama-survey.org/dr3/schema/dmu.php?id=9 """ if config is None: config = get_config() # Load catalogue from file lstr = "_lambdar" if lambdar else "" filename = os.path.join(config['directories']['data'], 'input', f'gama_masses{lstr}.csv') dfg = pd.read_csv(filename) fluxscale = dfg["fluxscale"].values h = config["cosmology"].h # Translate columns df = pd.DataFrame() df["redshift"] = dfg["Z"] df["logmoverl_i"] = dfg["logmoverl_i"] # log10 M*,total = logmstar + log10(fluxscale) - 2 log10(h/0.7) df["logmstar"] = dfg["logmstar"] + np.log10(fluxscale) - 2 * np.log10( h / 0.7) # M_X,total = absmag_X - 2.5 log10(fluxscale) + 5 log10(h/0.7) df["absmag_r"] = dfg["absmag_r"] - 2.5 * np.log10( fluxscale) + 5 * np.log10(h / 0.7) df["absmag_i"] = dfg["absmag_i"] - 2.5 * np.log10( fluxscale) + 5 * np.log10(h / 0.7) df["gi"] = dfg["gminusi"].values df["ur"] = dfg["uminusr"].values df["gr"] = dfg["gminusi"] + df["absmag_i"] - df["absmag_r"] df["logmstar_absmag_r"] = df["logmstar"] / df["absmag_r"] df["logmstar_absmag_i"] = df["logmstar"] / df["absmag_i"] with suppress(KeyError): df["n"] = dfg["GALINDEX_r"] q = 1 - dfg["GALELLIP_r"].values re = dfg["GALRE_r"].values rec = np.sqrt(q) * re df["mag_obs"] = dfg["GALMAG_r"].values df["rec_obs"] = rec df["uae_obs"] = sersic.mag2meanSB(mag=df["mag_obs"], re=rec, q=1) df["rec_phys"] = arcsec_to_kpc(rec, redshift=df["redshift"].values, cosmo=config["cosmology"]) df["kcorr_g"] = dfg["KCORR_G"] df["kcorr_r"] = dfg["KCORR_R"] df["kcorr_i"] = dfg["KCORR_I"] # Apply selections logmstar = df["logmstar"].values cond = (logmstar >= logmstar_min) & (logmstar < logmstar_max) cond &= (df["redshift"].values < z_max) if gi_max is not None: cond &= (df["gi"].values < gi_max) if ur_max is not None: cond &= (df["ur"].values < ur_max) if gr_max is not None: cond &= (df["gr"].values < gr_max) if n_max is not None: with suppress(KeyError): cond &= (df["n"].values < n_max) df = df[cond].reset_index(drop=True) return df
""" Calculate metrics using model samples generated from best-fit model in place of real observations. """ import argparse import os from udgsizes.core import get_config from udgsizes.fitting.grid import ParameterGrid CONFIG = get_config() METRICS_IGNORE = ["kstest_2d"] NITERS = 100 NPROC = 1 if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="The model name.") parsed_args = parser.parse_args() model_name = parsed_args.model_name # Load best sample grid = ParameterGrid(model_name) # Make directory for output directory = os.path.join(grid.directory, "faux") os.makedirs(directory, exist_ok=True) # Calculate metrics for i in range(NITERS):