def test_read_salt2_cov(): fname = join(dirname(__file__), "data", "lc-03D4ag.list") data = sncosmo.read_lc(fname, format="salt2", read_covmat=True) assert data["Fluxcov"].shape == (len(data), len(data)) assert_allclose(data["Fluxcov"][0:3, 0:3], [[0.867712297284, 0.01139998771, 0.01119398747], [0.01139998771, 2.03512047975, 0.01190299234], [0.01119398747, 0.01190299234, 1.3663344852]])
def plot_testdata(nsne, under_model): if not os.path.exists("lcplots_%s"%(under_model.name(nsne))): os.mkdir("lcplots_%s"%(under_model.name(nsne))) fnames = glob("testdata_%s/*"%(under_model.name(nsne))) for fname in fnames: plotname = fname.replace("testdata_%s"%(under_model.name(nsne)), "lcplots_%s"%(under_model.name(nsne))).replace("dat", "png") data = sncosmo.read_lc(fname) sncosmo.plot_lc(data, fname=plotname)
def buildzdist(dirname): lclist = glob.glob(dirname + "/*") z = [] for lcfile in lclist: lc = sncosmo.read_lc(lcfile) zval = lc.meta["z"] z.append(zval) return z
def test_read_salt2_old(): dname = join(dirname(__file__), "data", "SNLS3-04D3gx") data = sncosmo.read_lc(dname, format="salt2-old") # Test length and column names: assert len(data) == 25 + 37 + 38 + 18 # g + r + i + z lengths assert data.colnames == ["Date", "Flux", "Fluxerr", "ZP", "Filter", "MagSys"] # Test a bit of metadata and data assert data.meta["NAME"] == "04D3gx" assert_allclose(data.meta["Redshift"], 0.91) assert_allclose(data.meta["RA"], 215.056948) assert np.all(data["MagSys"] == "VEGA")
def test_read_salt2(): fname = join(dirname(__file__), "data", "salt2_example.dat") data = sncosmo.read_lc(fname, format="salt2") # Test a few columns assert_allclose(data["Date"], [52816.54, 52824.59, 52795.59, 52796.59]) assert_allclose(data["ZP"], [27.091335, 27.091335, 25.913054, 25.913054]) assert np.all(data["Filter"] == np.array( ["MEGACAM::g", "MEGACAM::g", "MEGACAM::i", "MEGACAM::i"])) assert np.all(data["MagSys"] == "VEGA") # Test a bit of metadata assert_allclose(data.meta["Z_HELIO"], 0.285) assert_allclose(data.meta["RA"], 333.690959) assert data.meta["z_source"] == "H"
def test_read_salt2(): fname = join(dirname(__file__), "data", "salt2_example.dat") data = sncosmo.read_lc(fname, format="salt2") # Test a few columns assert_allclose(data["Date"], [52816.54, 52824.59, 52795.59, 52796.59]) assert_allclose(data["ZP"], [27.091335, 27.091335, 25.913054, 25.913054]) assert np.all(data["Filter"] == np.array(["MEGACAM::g", "MEGACAM::g", "MEGACAM::i", "MEGACAM::i"])) assert np.all(data["MagSys"] == "VEGA") # Test a bit of metadata assert_allclose(data.meta["Z_HELIO"], 0.285) assert_allclose(data.meta["RA"], 333.690959) assert data.meta["z_source"] == "H"
def test_read_salt2(): fname = join(dirname(__file__), "data", "lc-03D4ag.list") data = sncosmo.read_lc(fname, format="salt2") # Test a few columns assert_allclose(data["Date"][0:4], [52816.54, 52824.59, 52851.53, 52873.4]) assert_allclose(data["ZP"][0:4], 27.036167) assert np.all(data["Filter"][0:4] == "MEGACAMPSF::g") assert np.all(data["MagSys"] == "AB_B12") # Test a bit of metadata assert_allclose(data.meta["Z_HELIO"], 0.285) assert_allclose(data.meta["RA"], 333.690959) assert data.meta["z_source"] == "H"
def test_roundtripping(): for format in ['json', 'ascii', 'salt2']: f = NamedTemporaryFile(delete=False) f.close() # close to ensure that we can open it in write_lc() # raw=True is for the benefit of salt2 writer that modifies column # and header names by default. sncosmo.write_lc(lcdata, f.name, format=format, raw=True, pedantic=False) data = sncosmo.read_lc(f.name, format=format) for key in lcdata.colnames: assert np.all(data[key] == lcdata[key]) for key in lcdata.meta: assert data.meta[key] == lcdata.meta[key] os.unlink(f.name)
def fromSALTFormat(cls, fname): _lc = sncosmo.read_lc(fname, format='salt2') lc = _lc.to_pandas() lc.MagSys = 'ab' def filtername(x): if 'megacam' in x.lower(): return 'megacam' else: return x[:-3].lower() banddict = dict((key.lower(), filtername(key) + key[-1]) for key in lc.Filter.unique()) return cls(lc, bandNameDict=banddict, ignore_case=True, propDict=_lc.meta)
def test_read_lc(): f = StringIO(""" @id 1 @RA 36.0 @description good time band flux fluxerr zp zpsys 50000. g 1. 0.1 25. ab 50000.1 r 2. 0.1 25. ab """) t = sncosmo.read_lc(f, format='ascii') assert str(t) == (" time band flux fluxerr zp zpsys\n" "------- ---- ---- ------- ---- -----\n" "50000.0 g 1.0 0.1 25.0 ab\n" "50000.1 r 2.0 0.1 25.0 ab") assert t.meta['id'] == 1 assert t.meta['RA'] == 36.0 assert t.meta['description'] == 'good'
def test_read_lc(): from astropy.extern.six import StringIO f = StringIO(""" @id 1 @RA 36.0 @description good time band flux fluxerr zp zpsys 50000. g 1. 0.1 25. ab 50000.1 r 2. 0.1 25. ab """) t = sncosmo.read_lc(f, format='ascii') assert str(t) == (" time band flux fluxerr zp zpsys\n" "------- ---- ---- ------- ---- -----\n" "50000.0 g 1.0 0.1 25.0 ab\n" "50000.1 r 2.0 0.1 25.0 ab") assert t.meta['id'] == 1 assert t.meta['RA'] == 36.0 assert t.meta['description'] == 'good'
def importance_sampling(nsne, under_model): # sampler parameters ndim = under_model.dim nwalkers = 20 nburn = 200 nsamples = 500 # Read all SN redshifts and previously-generated parameter samples snsamples = [] for fname in sorted(glob("testdata_%s/*" % (under_model.name(nsne)))): z = sncosmo.read_lc(fname).meta['z'] # load whole file just to get 'z' sfname = fname.replace( "testdata_%s" % (under_model.name(nsne)), "samples_%s" % (under_model.name(nsne))).replace(".dat", ".npy") samples = np.load(sfname) snsamples.append((z, samples)) # Create sampler sampler = emcee.EnsembleSampler(nwalkers, ndim, under_model.lnlike, args=(snsamples, )) # Starting positions errors = under_model.initial * 0.01 pos = np.array([ under_model.initial + errors * np.random.randn(ndim) for i in range(nwalkers) ]) # burn-in pos, prob, state = sampler.run_mcmc(pos, nburn) print("Burn in done") # production run sampler.reset() sampler.run_mcmc(pos, nsamples) print("Avg acceptance fraction:", np.mean(sampler.acceptance_fraction)) results = sampler.flatchain np.save( "samples_%s/globalsamples_%s.npy" % (under_model.name(nsne), under_model.name(nsne)), results)
def test_fit_lc_vs_snfit(): """Test fit_lc versus snfit result for one SN.""" # purposefully use CCM dust to match snfit model = sncosmo.Model(source='salt2', effects=[sncosmo.CCM89Dust()], effect_names=['mw'], effect_frames=['obs']) fname = join(dirname(__file__), "data", "lc-03D4ag.list") data = sncosmo.read_lc(fname, format='salt2', read_covmat=True, expand_bands=True) model.set(mwebv=data.meta['MWEBV'], z=data.meta['Z_HELIO']) result, fitted_model = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c'], bounds={ 'x1': (-3., 3.), 'c': (-0.4, 0.4) }, modelcov=True, phase_range=(-15., 45.), wave_range=(3000., 7000.), warn=False, verbose=False) print(result) assert result.ndof == 25 assert result.nfit == 3 assert_allclose(fitted_model['t0'], 52830.9313, atol=0.01, rtol=0.) assert_allclose(fitted_model['x0'], 5.6578663e-05, atol=0., rtol=0.005) assert_allclose(fitted_model['x1'], 0.937399344, atol=0.005, rtol=0.) assert_allclose(fitted_model['c'], -0.0851965244, atol=0.001, rtol=0.) # errors assert_allclose(result.errors['t0'], 0.0955792638, atol=0., rtol=0.01) assert_allclose(result.errors['x0'], 1.52745001e-06, atol=0., rtol=0.01) assert_allclose(result.errors['x1'], 0.104657847, atol=0., rtol=0.01) assert_allclose(result.errors['c'], 0.0234763446, atol=0., rtol=0.01)
def test_fit_lc_vs_snfit(): """Test fit_lc versus snfit result for one SN.""" # purposefully use CCM dust to match snfit model = sncosmo.Model(source='salt2', effects=[sncosmo.CCM89Dust()], effect_names=['mw'], effect_frames=['obs']) fname = join(dirname(__file__), "data", "lc-03D4ag.list") data = sncosmo.read_lc(fname, format='salt2', read_covmat=True, expand_bands=True) model.set(mwebv=data.meta['MWEBV'], z=data.meta['Z_HELIO']) result, fitted_model = sncosmo.fit_lc( data, model, ['t0', 'x0', 'x1', 'c'], bounds={'x1': (-3., 3.), 'c': (-0.4, 0.4)}, modelcov=True, phase_range=(-15., 45.), wave_range=(3000., 7000.), warn=False, verbose=False) print(result) assert result.ndof == 25 assert result.nfit == 3 assert_allclose(fitted_model['t0'], 52830.9313, atol=0.01, rtol=0.) assert_allclose(fitted_model['x0'], 5.6578663e-05, atol=0., rtol=0.005) assert_allclose(fitted_model['x1'], 0.937399344, atol=0.005, rtol=0.) assert_allclose(fitted_model['c'], -0.0851965244, atol=0.001, rtol=0.) # errors assert_allclose(result.errors['t0'], 0.0955792638, atol=0., rtol=0.01) assert_allclose(result.errors['x0'], 1.52745001e-06, atol=0., rtol=0.01) assert_allclose(result.errors['x1'], 0.104657847, atol=0., rtol=0.01) assert_allclose(result.errors['c'], 0.0234763446, atol=0., rtol=0.01)
import matplotlib matplotlib.use('Agg') from copy import copy import sncosmo import numpy as np from bolomc import bump from bolomc import burns from bolomc.distributions import TruncNorm lc = sncosmo.read_lc('../data/CSP_Photometry_DR2/SN2005elopt+nir_photo.dat', format='csp') model = sncosmo.Model(bump.BumpSource(), effect_names=['host','mw'], effect_frames=['rest','obs'], effects=[sncosmo.OD94Dust(), sncosmo.F99Dust()]) model2 = copy(model) model2.set(UV_bump_amp=1.,#blue_bump_amp=0.2, blue_bump_amp=-0.2, i1_bump_amp=0.1, i2_bump_amp=-0.2, y1_bump_amp=-0.2, y2_bump_amp=0.2, y3_bump_amp=-0.1, j1_bump_amp=-0.2, j2_bump_amp=0.2, h1_bump_amp=-0.2, h2_bump_amp=0.2, k1_bump_amp=-0.2, k2_bump_amp=0.2)
import sncosmo import os import corner import copy import filters from scipy.stats import chi2 from scipy.integrate import quad phot_d = None ########################################################################### # DATA sn = '16geu' # ground data fname = '%s_clean.dat' % (sn) # Cleaned light curve ############################################################################## phot_d = sncosmo.read_lc(fname) # read the ground data phot_d['imageid'] = 0 # ground data have imageid = 0! errfloor = 0.0 ############################################################################## # Here the error bars of the grond data can be adjusted. lenserr = 0.00 # set to 0.05 when using gravlens model #phot_d['fluxerr'] = np.sqrt(phot_d['fluxerr']**2+(phot_d['flux']*lenserr)**2) phot_d['fluxerr'] = np.sqrt((phot_d['flux'] * errfloor)**2 + (phot_d['fluxerr'])**2) # scale error bars errfloor = 0.08 ############################################################################## # Set up the data of the HST to be read in sumdata = False lcfname = { # the names of the data files 'uvf625w': 'lc_uvf625w_resolved.csv',
def task(filename, i, j, nrv, nebv, kind='mcmc'): lc = sncosmo.read_lc(filename, format='csp') model = sncosmo.Model(bump.BumpSource(), effect_names=['host','mw'], effect_frames=['rest','obs'], effects=[sncosmo.OD94Dust(), sncosmo.F99Dust()]) rv_prior = burns.get_hostrv_prior(lc.meta['name'], 'gmm', sncosmo.OD94Dust) host_ebv, err = burns.get_hostebv(lc.meta['name']) ebv_prior = TruncNorm(-np.inf, np.inf, host_ebv, err) rv_prior, low, high = burns.get_hostrv_prior(lc.meta['name'], 'gmm', sncosmo.OD94Dust, retlims=True) host_ebv, err = burns.get_hostebv(lc.meta['name']) rv = np.linspace(low if low >= 0 else 0, high, nrv)[i] ebvlo = host_ebv - err ebvhi = host_ebv + err ebv = np.linspace(ebvlo if ebvlo >= 0 else 0, ebvhi, nebv)[j] model.set(z=lc.meta['zcmb']) model.set(mwebv=burns.get_mwebv(lc.meta['name'])[0]) model.set(hostebv=ebv) model.set(hostr_v=rv) model.set(t0=burns.get_t0(lc.meta['name'])) vparams = filter(lambda x: 'bump' in x, model._param_names) vparams += ['t0', 's'] bounds = {b.name + "_bump_amp":(-1,2) for b in model.source.bumps} #bounds['hostr_v'] = (rv_prior.mean - 0.5, rv_prior.mean + 0.5) #bounds['hostebv'] = (0, 0.2) bounds['s'] = (0, 3.) res, model = sncosmo.fit_lc(lc,model,['amplitude']+vparams, bounds=bounds) bounds['t0'] = (model.get('t0')-2, model.get('t0')+2) vparams.append('amplitude') bounds['amplitude'] = (0.5 * model.get('amplitude'), 2 * model.get('amplitude')) qualifier = '_ebv_%.2f_rv_%.2f' % (ebv, rv) if kind != 'fit': if kind == 'mcmc': result = sncosmo.mcmc_lc(lc, model, vparams, bounds=bounds, nwalkers=500, nburn=1000, nsamples=20) elif kind == 'nest': result = sncosmo.nest_lc(lc, model, vparams, bounds=bounds, method='multi', npoints=800) samples = result[0].samples.reshape(500, 20, -1) vparams = result[0].vparam_names plot_arg = np.rollaxis(samples, 2) plotting.plot_chains(plot_arg, param_names=vparams, filename='fits/%s_samples%s.pdf' % (lc.meta['name'], qualifier)) dicts = [dict(zip(vparams, samp)) for samp in samples.reshape(500 * 20, -1)] thinned = samples.reshape(500, 20, -1)[:, [0, -1]].reshape(1000, -1) pickle.dump(samples, open('fits/%s_samples%s.pkl' % (lc.meta['name'], qualifier), 'wb')) models = [copy(result[1]) for i in range(len(thinned))] for d, m in zip(dicts, models): m.set(**d) fig = sncosmo.plot_lc(data=lc, model=models, ci=(50-68/2., 50., 50+68/2.), model_label=lc.meta['name']) fig.savefig('fits/%s%s.pdf' % (lc.meta['name'], qualifier)) else: fitres, model = sncosmo.fit_lc(lc, model, vparams, bounds=bounds) fig = sncosmo.plot_lc(data=lc, model=model) fig.savefig('fits/%s_fit%s.pdf' % (lc.meta['name'], qualifier))
def nonvparams(snname): lc = sncosmo.read_lc('../data/CSP_Photometry_DR2/%sopt+nir_photo.dat' % snname, format='csp') z = lc.meta['zcmb'] mwebv, _ = burns.get_mwebv(snname) return {'z':z, 'mwebv':mwebv}
def task(filename, i, j, nrv, nebv, kind='mcmc'): lc = sncosmo.read_lc(filename, format='csp') model = sncosmo.Model(bump.BumpSource(), effect_names=['host', 'mw'], effect_frames=['rest', 'obs'], effects=[sncosmo.OD94Dust(), sncosmo.F99Dust()]) rv_prior = burns.get_hostrv_prior(lc.meta['name'], 'gmm', sncosmo.OD94Dust) host_ebv, err = burns.get_hostebv(lc.meta['name']) ebv_prior = TruncNorm(-np.inf, np.inf, host_ebv, err) rv_prior, low, high = burns.get_hostrv_prior(lc.meta['name'], 'gmm', sncosmo.OD94Dust, retlims=True) host_ebv, err = burns.get_hostebv(lc.meta['name']) rv = np.linspace(low if low >= 0 else 0, high, nrv)[i] ebvlo = host_ebv - err ebvhi = host_ebv + err ebv = np.linspace(ebvlo if ebvlo >= 0 else 0, ebvhi, nebv)[j] model.set(z=lc.meta['zcmb']) model.set(mwebv=burns.get_mwebv(lc.meta['name'])[0]) model.set(hostebv=ebv) model.set(hostr_v=rv) model.set(t0=burns.get_t0(lc.meta['name'])) vparams = filter(lambda x: 'bump' in x, model._param_names) vparams += ['t0', 's'] bounds = {b.name + "_bump_amp": (-1, 2) for b in model.source.bumps} #bounds['hostr_v'] = (rv_prior.mean - 0.5, rv_prior.mean + 0.5) #bounds['hostebv'] = (0, 0.2) bounds['s'] = (0, 3.) res, model = sncosmo.fit_lc(lc, model, ['amplitude'] + vparams, bounds=bounds) bounds['t0'] = (model.get('t0') - 2, model.get('t0') + 2) vparams.append('amplitude') bounds['amplitude'] = (0.5 * model.get('amplitude'), 2 * model.get('amplitude')) qualifier = '_ebv_%.2f_rv_%.2f' % (ebv, rv) if kind != 'fit': if kind == 'mcmc': result = sncosmo.mcmc_lc(lc, model, vparams, bounds=bounds, nwalkers=500, nburn=1000, nsamples=20) elif kind == 'nest': result = sncosmo.nest_lc(lc, model, vparams, bounds=bounds, method='multi', npoints=800) samples = result[0].samples.reshape(500, 20, -1) vparams = result[0].vparam_names plot_arg = np.rollaxis(samples, 2) plotting.plot_chains(plot_arg, param_names=vparams, filename='fits/%s_samples%s.pdf' % (lc.meta['name'], qualifier)) dicts = [ dict(zip(vparams, samp)) for samp in samples.reshape(500 * 20, -1) ] thinned = samples.reshape(500, 20, -1)[:, [0, -1]].reshape(1000, -1) pickle.dump( samples, open('fits/%s_samples%s.pkl' % (lc.meta['name'], qualifier), 'wb')) models = [copy(result[1]) for i in range(len(thinned))] for d, m in zip(dicts, models): m.set(**d) fig = sncosmo.plot_lc(data=lc, model=models, ci=(50 - 68 / 2., 50., 50 + 68 / 2.), model_label=lc.meta['name']) fig.savefig('fits/%s%s.pdf' % (lc.meta['name'], qualifier)) else: fitres, model = sncosmo.fit_lc(lc, model, vparams, bounds=bounds) fig = sncosmo.plot_lc(data=lc, model=model) fig.savefig('fits/%s_fit%s.pdf' % (lc.meta['name'], qualifier))
np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64, np.float64) outdata = astropy.table.Table(masked=True, names=names, dtype=dtype) for i, lc in enumerate(lcfile): ################################################################################# if args.jla: data = sncosmo.read_lc(lc, format='salt2') nickname = data.meta['SN'] if args.cadencesim: data = sncosmo.read_lc(lc, format='ascii') nickname = lc.split('.')[0].split('/')[-1] ################################################################################# try: nickname = str(int(nickname)) except: pass ################################################################################# if args.jla: fitfile = './fit_results/emcee/JLA/%s/%s.dat' % (nickname, nickname) if args.cadencesim: fitfile = './fit_results/emcee/cadencesim/%s/%s.dat' % (nickname,
return (_snFit(args)) except: return (None) files = glob.glob('*clipped.dat') zpMag = sncosmo.get_magsystem('Vega') sne = [] times = [] #plt.figure() for f in files: if f not in ['lc_2009kn_clipped.dat', 'lc_2010bq_clipped.dat']: continue print(f) t0 = 0 lc = sncosmo.read_lc(f) f = f[:-8] if len(lc[lc['Band'] == 'U']) == 0 and len( lc[lc['Band'] == 'u']) == 0 and len(lc[lc['Band'] == 'J']) == 0: #os.remove(f) continue lc = lc[lc['Band'] != 'U'] lc = lc[lc['Band'] != 'u'] lc = lc[lc['Band'] != 'H'] lc = lc[lc['Band'] != 'J'] lc = lc[lc['Band'] != 'K'] lc = lc[lc['Band'] != 'i'] lc = lc[lc['Band'] != 'I'] if not lc: #os.remove(f)
figname = (fname.replace("testdata", "lcplots") .replace(".dat", "_lcfit.png")) model.parameters[1:5] = showparams sncosmo.plot_lc(data, model, fname=figname) labels = ["${0}$".format(s) for s in model.param_names_latex[1:5]] fig = triangle.corner(samples, labels=labels, bins=30) figname = (fname.replace("testdata", "lcplots") .replace(".dat", "_corner.png")) plt.savefig(figname) """ # Get data from all SNe data_list = [] for fname in fnames[:n_obs]: data = sncosmo.read_lc(fname) data_list.append(data) # Do MCMC: all_samples = sample_all(data_list) np.savetxt('emcee_samples.dat', all_samples) all_params = np.average(all_samples, axis=0) # Make plots of best fit global parameters and histograms of best fit # parameters for each supernova: labels = ['Omega', 'x0_0', 'alpha', 'beta'] fig = triangle.corner(all_samples[:, :4], labels=labels, bins=30) plt.savefig('global_params_%ssne.png' % n_obs) fig2, axes = plt.subplots(2, 2) titles = ['t0', 'sigma', 'x1', 'c']
rank = comm.Get_rank() size = comm.Get_size() # partition an iterable i into n parts _split = lambda i,n: [i[:len(i)/n]]+_split(i[len(i)/n:],n-1) if n != 0 else [] # Shorthand for a few variables with long names. od94 = sncosmo.OD94Dust f99 = sncosmo.F99Dust # Load the run configuration file. config_filename = sys.argv[1] config = yaml.load(open(config_filename).read()) # Read the LC to fit. lc = sncosmo.read_lc(config['lc_filename'], format='csp') name = lc.meta['name'] # Configure the properties of the host galaxy dust. dust_type = od94 if config['dust_type'] == 'od94' else f99 bintype = config['burns_bintype'] # Get the host galaxy dust data. host_ebv, err = burns.get_hostebv(name) _, rv_low, rv_hi = burns.get_hostrv_prior(name, bintype, dust_type, retlims=True) ebvlo = host_ebv - err ebvhi = host_ebv + err # Do ebv / r_v gridding. nrv = config['nrv']
import matplotlib matplotlib.use('Agg') from copy import copy import sncosmo import numpy as np from bolomc import bump from bolomc import burns from bolomc.distributions import TruncNorm lc = sncosmo.read_lc('../data/CSP_Photometry_DR2/SN2005elopt+nir_photo.dat', format='csp') model = sncosmo.Model(bump.BumpSource(), effect_names=['host', 'mw'], effect_frames=['rest', 'obs'], effects=[sncosmo.OD94Dust(), sncosmo.F99Dust()]) model2 = copy(model) model2.set( UV_bump_amp=1., #blue_bump_amp=0.2, blue_bump_amp=-0.2, i1_bump_amp=0.1, i2_bump_amp=-0.2, y1_bump_amp=-0.2, y2_bump_amp=0.2, y3_bump_amp=-0.1, j1_bump_amp=-0.2, j2_bump_amp=0.2, h1_bump_amp=-0.2,
def load_example_lcs(): return ([ _standardize(sncosmo.read_lc(f)) for f in glob.glob( os.path.join(__dir__, 'example_setup', 'lcs', '*.lc')) ])
from astropy.table import Table import numpy as np import glob from IPython import embed import sys sne = glob.glob('./lc/*.list') names = ['SN', 'tmax', 'color', 'x1r', 'x1f', 'mB', 'mwebv'] dtype = [ 'S50', np.float64, np.float64, np.float64, np.float64, np.float64, np.float64 ] t = Table(names=names, dtype=dtype) for i, sn in enumerate(sne): data = sncosmo.read_lc(sn, format='salt2') daymax = data.meta['DAYMAX'] x1r = data.meta['X1R'] x1f = data.meta['X1F'] c = data.meta['C'] mB = data.meta['MB'] name = sn.split('.')[-2].split('/')[-1] mwebv = data.meta['MWEBV'] t.add_row([name, daymax, c, x1r, x1f, mB, mwebv]) t.write('./cadence_sim_inputs.txt', format='ascii.fixed_width', delimiter=' ', overwrite=True)
def sample_lcs(nsne, under_model): if not os.path.exists("samples_%s" % (under_model.name(nsne))): os.mkdir("samples_%s" % (under_model.name(nsne))) # sampler parameters ndim = 4 nwalkers = 20 nburn = 100 nsamples = 500 thin = 10 # define likelihood model = sncosmo.Model(source='salt2-extended') bounds = { 0: (-20., 40.), # t0 2: (-4., 4.), # x1 3: (-0.4, 0.4) } def lnlike(parameters, data): """Return log L for array of parameters.""" # If any parameters are out-of-bounds, return 0 probability. for i, b in bounds.items(): if not b[0] < parameters[i] < b[1]: return -np.inf model.parameters[1:5] = parameters # set model t0, x0, x1, c mflux = model.bandflux(data['band'], data['time'], zp=data['zp'], zpsys=data['zpsys']) chisq = np.sum(((data['flux'] - mflux) / data['fluxerr'])**2) return -chisq / 2. def sample(data): """Return MCMC samples for model defined above.""" # fix redshift in the model model.set(z=data.meta['z']) # Create sampler sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike, args=(data, ), a=3.50) # Starting positions of walkers. # Here we cheat by setting the "guess" equal to the known true parameters! current = np.array([ data.meta['t0'], data.meta['x0'], data.meta['x1'], data.meta['c'] ]) errors = np.array([1., 0.1 * data.meta['x0'], 1., 0.1]) pos = [ current + errors * np.random.randn(ndim) for i in range(nwalkers) ] # burn-in pos, prob, state = sampler.run_mcmc(pos, nburn) sampler.reset() # production run sampler.run_mcmc(pos, nsamples, thin=thin) print("Avg acceptance fraction:", np.mean(sampler.acceptance_fraction)) return sampler.flatchain fnames = sorted(glob("testdata_%s/*" % (under_model.name(nsne)))) for fname in fnames: print("processing", fname) # read data, run sampler on it, save samples data = sncosmo.read_lc(fname) samples = sample(data) sfname = (fname.replace( "testdata_%s" % (under_model.name(nsne)), "samples_%s" % (under_model.name(nsne))).replace(".dat", ".npy")) np.save(sfname, samples)
print str(x) print str(len(relevantdata[0])) # Write out to a file as this is how we will do things in a larger set by looping through, and then read in the file model.set(**params[0]) # fig_relevant = sncosmo.plot_lc(relevantdata[0], model=model) #print "Close Window to continute" #pl.show() sncosmo.write_lc(Table(relevantdata[0]), fname='lc.dat', format='ascii') # sncosmo.write_lc(Table(relevantdata[0]), fname='lc.dat.json', format='json') # sncosmo.write_lc(Table(relevantdata[0]), fname='lc.dat.fits', format='snana') # fits lc = sncosmo.read_lc('lc.dat', format='ascii') fmodel = sncosmo.Model(source='salt2-extended') for z in zvals: fmodel.set(z=z) res, fitmodel = sncosmo.fit_lc(relevantdata[0], fmodel, ['t0', 'x0', 'x1', 'c']) print res print params[0]
def _read_data(filename,**kwargs): table_done=True try: table = sncosmo.read_lc(filename, masked=True,**kwargs) for col in table.colnames: if col != _get_default_prop_name(col.lower()): table.rename_column(col, _get_default_prop_name(col.lower())) except: try: table = ascii.read(filename, masked=True,**kwargs) for col in table.colnames: if col != _get_default_prop_name(col.lower()): table.rename_column(col, _get_default_prop_name(col.lower())) except: table = Table(masked=True) table_done=False delim = kwargs.get('delim', None) myCurve=curve() with anyOpen(filename) as f: lines=f.readlines() length=mode([len(l.split()) for l in lines])[0][0]#uses the most common line length as the correct length for i,line in enumerate(lines): if np.any([x in line for x in _comment_char]): continue line = line.strip(delim) if len(line)==0: continue if np.any([x in line for x in _meta__]): pos = line.find(' ') if (lines[i + 1][0] in _meta__ or lines[i + 1][0] in _comment_char or len( line) != length): # just making sure we're not about to put the col names into meta if (pos == -1 or not any([_isfloat(x) for x in line.split()])): if line[-1] not in string.punctuation: line=line+'.' else: myCurve.meta[line[1:pos]] = _cast_str(line[pos:]) continue line=line.split() if len(line)!= length: raise(RuntimeError,"Make sure your data are in a square matrix.") if table.colnames: colnames=table.colnames else: colnames = odict.fromkeys([_get_default_prop_name(x.lower()) for x in line]) if len(colnames) != len(line): raise (RuntimeError, "Do you have duplicate column names?") colnames=odict(zip(colnames,range(len(colnames)))) startLine=i break f.close() if not table_done: lines = [x.strip().split() for x in lines[startLine + 1:] if not np.any([y in x for y in _comment_char])] for col in colnames: table[col]=np.asarray([_cast_str(x[colnames[col]]) for x in lines]) colnames=colnames.keys() for col in [_get_default_prop_name(x) for x in ['band','zp','zpsys']]: if col not in colnames: temp=kwargs.get(col,None) if temp is not None: table[col]=temp else: print('Column "%s" is not in your file and you did not define it in kwargs.'%col) sys.exit(1) table=standardize_table_colnames(table) bnds = {x for x in table[_get_default_prop_name('band')]} table=_norm_flux_mag(table) for band in bnds: if _isfloat(band[0]): band='band_'+band try: if band[0:5]=='band_': sncosmo.get_bandpass(band[5:]) else: sncosmo.get_bandpass(band) except: print('Skipping band %s, not in registry.' %band) table.mask[table[_get_default_prop_name('band')]==band]=True continue myCurve.bands.append(band) myCurve.table=table myCurve.zpsys=table['zpsys'][0] return myCurve
def load_example_lc(): return (_standardize( sncosmo.read_lc( os.path.join(__dir__, 'example_setup', 'lcs', 'example_2006aj.lc'))))
def curveToColor(lc, colors, bandFit=None, snType='II', bandDict=_filters, color_bands=_opticalBands, zpsys='AB', model=None, singleBand=False, verbose=True, **kwargs): """ Function takes a lightcurve file and creates a color table for it. :param lc: Name of lightcurve file you want to read, or astropy Table containing data :type lc: str or astropy.Table :param colors: Colors you want to calculate for the given SN (i.e U-B, r'-J) :type colors: str or list of strings :param bandFit: If there is a specific band you would like to fit instead of default :type bandFit: str,optional :param snType: Classification of SN :type snType: str,optional :param bandDict: sncosmo bandpass for each band used in the fitting/table generation :type bandDict: dict,optional :param color_bands: bands making up the known component of chosen colors :type color_bands: list,optional :param zpsys: magnitude system (i.e. AB or Vega) :type zpsys: str,optional :param model: If there is a specific sncosmo model you would like to fit with, otherwise all models mathcing the SN classification will be tried :type model: str,optional :param singleBand: If you would like to only fit the bands in the color :type singleBand: Boolean,optional :param verbose: If you would like printing information to be turned on/off :type verbose: Boolean,optional :param kwargs: Catches all SNCOSMO fitting parameters here :returns: colorTable: Astropy Table object containing color information """ bands = append([col[0] for col in colors], [col[-1] for col in colors]) for band in _filters: if band not in bandDict.keys() and band in bands: bandDict[band] = sncosmo.get_bandpass(_filters[band]) if not isinstance(colors, (tuple, list)): colors = [colors] zpMag = sncosmo.get_magsystem(zpsys) if isinstance(lc, str): curve = _standardize(sncosmo.read_lc(lc)) else: try: curve = _standardize(lc) except: raise RuntimeError("Can't understand your lightcurve.") if _get_default_prop_name('zpsys') not in curve.colnames: curve[_get_default_prop_name('zpsys')] = zpsys colorTable = Table(masked=True) colorTable.add_column(Column(data=[], name=_get_default_prop_name('time'))) for band in bandDict: if not isinstance(bandDict[band], sncosmo.Bandpass): bandDict = _bandCheck(bandDict, band) t0 = None if verbose: print('Getting best fit for: ' + ','.join(colors)) args = [] for p in _fittingParams: args.append(kwargs.get(p, _fittingParams[p])) if p == 'method': try: importlib.import_module(_fittingPackages[args[-1]]) except RuntimeError: sys.exit() for color in colors: #start looping through desired colors if bandDict[color[ 0]].wave_eff < _UVrightBound: #then extrapolating into the UV from optical if not bandFit: bandFit = color[-1] if singleBand: color_bands = [color[-1]] blue = curve[curve[_get_default_prop_name('band')] == color[0]] #curve on the blue side of current color red = curve[[ x in color_bands for x in curve[_get_default_prop_name('band')] ]] #curve on the red side of current color else: #must be extrapolating into the IR if not bandFit: bandFit = color[0] if singleBand: color_bands = [color[0]] blue = curve[[ x in color_bands for x in curve[_get_default_prop_name('band')] ]] red = curve[curve[_get_default_prop_name('band')] == color[-1]] if len(blue) == 0 or len(red) == 0: if verbose: print('Asked for color %s but missing necessary band(s)' % color) bandFit = None continue btemp = [ bandDict[blue[_get_default_prop_name('band')][i]].name for i in range(len(blue)) ] rtemp = [ bandDict[red[_get_default_prop_name('band')][i]].name for i in range(len(red)) ] blue.remove_column(_get_default_prop_name('band')) blue[_get_default_prop_name('band')] = btemp red.remove_column(_get_default_prop_name('band')) red[_get_default_prop_name('band')] = rtemp #now make sure we have zero-points and fluxes for everything if _get_default_prop_name('zp') not in blue.colnames: blue[_get_default_prop_name('zp')] = [ zpMag.band_flux_to_mag(1 / sncosmo.constants.HC_ERG_AA, blue[_get_default_prop_name('band')][i]) for i in range(len(blue)) ] if _get_default_prop_name('zp') not in red.colnames: red[_get_default_prop_name('zp')] = [ zpMag.band_flux_to_mag(1 / sncosmo.constants.HC_ERG_AA, red[_get_default_prop_name('band')][i]) for i in range(len(red)) ] if _get_default_prop_name('flux') not in blue.colnames: blue = mag_to_flux(blue, bandDict, zpsys) if _get_default_prop_name('flux') not in red.colnames: red = mag_to_flux(red, bandDict, zpsys) if not t0: #this just ensures we only run the fitting once if not model: if verbose: print('No model provided, running series of models.') mod, types = loadtxt(os.path.join(__dir__, 'data', 'sncosmo', 'models.ref'), dtype='str', unpack=True) modDict = {mod[i]: types[i] for i in range(len(mod))} if snType != 'Ia': mods = [ x for x in sncosmo.models._SOURCES._loaders.keys() if x[0] in modDict.keys() and modDict[x[0]][:len(snType)] == snType ] elif snType == 'Ia': mods = [ x for x in sncosmo.models._SOURCES._loaders.keys() if 'salt2' in x[0] ] mods = { x[0] if isinstance(x, (tuple, list)) else x for x in mods } if bandFit == color[0] or len(blue) > len(red): args[0] = blue if len(blue) > 60: fits = [] for mod in mods: fits.append(_snFit([mod] + args)) else: fits = pyParz.foreach(mods, _snFit, args) fitted = blue notFitted = red fit = color[0] elif bandFit == color[-1] or len(blue) < len(red): args[0] = red ''' data,temp= sncosmo_fitting.cut_bands(photometric_data(red), sncosmo.Model(tempMod), z_bounds=all_bounds.get('z', None), warn=True) print(data.fluxerr) cov = diag(data.fluxerr**2) if data.fluxcov is None else data.fluxcov invcov = linalg.pinv(cov) args.append(invcov) sys.exit() fits=pyParz.foreach(mods,_snFit,args) args.pop() ''' if len(red) > 60: fits = [] for mod in mods: fits.append(_snFit([mod] + args)) else: fits = pyParz.foreach(mods, _snFit, args) fitted = red notFitted = blue fit = color[-1] else: raise RuntimeError( 'Neither band "%s" nor band "%s" has more points, and you have not specified which to fit.' % (color[0], color[-1])) bestChisq = inf for f in fits: if f: res, mod = f if res.chisq < bestChisq: bestChisq = res.chisq bestFit = mod bestRes = res if verbose: print('Best fit model is "%s", with a Chi-squared of %f' % (bestFit._source.name, bestChisq)) elif bandFit == color[0] or len(blue) > len(red): args[0] = blue bestRes, bestFit = _snFit(append(model, args)) fitted = blue notFitted = red fit = color[0] if verbose: print( 'The model you chose (%s) completed with a Chi-squared of %f' % (model, bestRes.chisq)) elif bandFit == color[-1] or len(blue) < len(red): args[0] = red bestRes, bestFit = _snFit(append(model, args)) fitted = red notFitted = blue fit = color[-1] if verbose: print( 'The model you chose (%s) completed with a Chi-squared of %f' % (model, bestRes.chisq)) else: raise RuntimeError( 'Neither band "%s" nor band "%s" has more points, and you have not specified which to fit.' % (color[0], color[-1])) t0 = _getBandMaxTime(bestFit, fitted, bandDict, 'B', zpMag.band_flux_to_mag(1, bandDict['B']), zpsys) if len(t0) == 1: t0 = t0[0] else: raise RuntimeError('Multiple global maxima in best fit.') else: if len(blue) > len(red) or bandFit == color[0]: fitted = blue notFitted = red fit = color[0] elif len(blue) < len(red) or bandFit == color[-1]: fitted = red notFitted = blue fit = color[-1] else: raise RuntimeError( 'Neither band "%s" nor band "%s" has more points, and you have not specified which to fit.' % (color[0], color[-1])) #return(bestFit,bestRes,t0,fitted,notFitted) tGrid, bestMag = _snmodel_to_mag(bestFit, fitted, zpsys, bandDict[fit]) ugrid, UMagErr, lgrid, LMagErr = _getErrorFromModel( append([bestFit._source.name, fitted], args[1:]), zpsys, bandDict[fit]) tempTable = Table( [tGrid - t0, bestMag, bestMag * .1], names=(_get_default_prop_name('time'), _get_default_prop_name('mag'), _get_default_prop_name('magerr') )) #****RIGHT NOW THE ERROR IS JUST SET TO 10%***** interpFunc = scint.interp1d( array(tempTable[_get_default_prop_name('time')]), array(tempTable[_get_default_prop_name('mag')])) minterp = interpFunc( array(notFitted[_get_default_prop_name('time')] - t0)) interpFunc = scint.interp1d(ugrid - t0, UMagErr) uinterp = interpFunc( array(notFitted[_get_default_prop_name('time')] - t0)) interpFunc = scint.interp1d(lgrid - t0, LMagErr) linterp = interpFunc( array(notFitted[_get_default_prop_name('time')] - t0)) magerr = mean([minterp - uinterp, linterp - minterp], axis=0) for i in range(len(minterp)): colorTable.add_row(append( notFitted[_get_default_prop_name('time')][i] - t0, [1 for j in range(len(colorTable.colnames) - 1)]), mask=[ True if j > 0 else False for j in range(len(colorTable.colnames)) ]) if fit == color[0]: colorTable[color] = MaskedColumn( append([1 for j in range(len(colorTable) - len(minterp))], minterp - array(notFitted[_get_default_prop_name('mag')])), mask=[ True if j < (len(colorTable) - len(minterp)) else False for j in range(len(colorTable)) ]) else: colorTable[color] = MaskedColumn( append([1 for j in range(len(colorTable) - len(minterp))], array(notFitted[_get_default_prop_name('mag')]) - minterp), mask=[ True if j < (len(colorTable) - len(minterp)) else False for j in range(len(colorTable)) ]) colorTable[color[0] + color[-1] + '_err'] = MaskedColumn( append([1 for j in range(len(colorTable) - len(magerr))], magerr + array(notFitted[_get_default_prop_name('magerr')])), mask=[ True if j < (len(colorTable) - len(magerr)) else False for j in range(len(colorTable)) ]) #colorTable['V-r']=MaskedColumn(append([1 for j in range(len(colorTable)-len(magerr))],[bestFit.color('bessellv','sdss::r',zpsys,t0) for i in range(len(linterp))]),mask=[True if j<(len(colorTable)-len(magerr)) else False for j in range(len(colorTable))]) tempVRCorr = 0 for name in bestFit.effect_names: magCorr = _unredden( color, bandDict, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], bestRes.parameters[bestRes.param_names.index(name + 'r_v')]) colorTable[color] -= magCorr tempVRCorr += _unredden( 'V-R', bandDict, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], bestRes.parameters[bestRes.param_names.index(name + 'r_v')]) corr1 = _ccm_extinction( sncosmo.get_bandpass('besselli').wave_eff, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], r_v=3.1) corr2 = _ccm_extinction( sncosmo.get_bandpass('bessellr').wave_eff, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], r_v=3.1) corr3 = _ccm_extinction( sncosmo.get_bandpass('bessellb').wave_eff, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], r_v=3.1) corr4 = _ccm_extinction( sncosmo.get_bandpass('bessellv').wave_eff, bestRes.parameters[bestRes.param_names.index(name + 'ebv')], r_v=3.1) vr = [ x - tempVRCorr for x in bestFit.color('bessellv', 'bessellr', zpsys, arange(t0 - 20, t0 + 100, 1)) ] #vr=(bestFit.bandmag('bessellr',zpsys,arange(t0-20,t0+100,1))-bestFit.bandmag('besselli',zpsys,arange(t0-20,t0+100,1))-(corr2-corr1))/(bestFit.bandmag('bessellb',zpsys,arange(t0-20,t0+100,1))-bestFit.bandmag('bessellv',zpsys,arange(t0-20,t0+100,1))-(corr3-corr4)) bandFit = None colorTable.sort(_get_default_prop_name('time')) return (colorTable, vr)
def grabdata(loc,list): f = open(list,'r').readlines() files = [] for file in f: files.append(os.path.join(loc,file.strip())) lightcurves = [] hostzs = [] hostz_stds = [] snids = [] sntypes = [] params = [] max = 20000 cntr = 0 for file in files: cntr += 1 if cntr > max: continue sn = open(file,'r').readlines() keepgoing = True i = 0 sntype = -999 snid = -999 hostz = -999 hostz_std = -999 while keepgoing: if 'SNID:' in sn[i]: snid = sn[i].split()[1] if 'SNTYPE:' in sn[i]: sntype = sn[i].split()[1] if 'HOST_GALAXY_PHOTO-Z:' in sn[i]: hostz = float(sn[i].split()[1]) hostz_std = float(sn[i].split()[3]) keepgoing = False i += 1 hostzs.append(hostz) hostz_stds.append(hostz_std) snids.append(snid) sntypes.append(sntype) sndata = dt.read(file,23,24) flux = sndata['FLUXCAL'] fluxerr = sndata['FLUXCALERR'] filt = sndata['FLT'] mjd = sndata['MJD'] badrow = -9 for i,f,fe,flt,m in zip(range(len(flux)),flux,fluxerr,filt,mjd): if 'g+r+i+z' in str(f): badrow = i elif 'g+r+i+z' in str(fe): badrow = i elif 'g+r+i+z' in str(flt): badrow = i elif 'g+r+i+z' in str(m): badrow = i if badrow != -9: flux = np.delete(flux,badrow) fluxerr = np.delete(fluxerr,badrow) filt = np.delete(filt, badrow) mjd = np.delete(mjd, badrow) cosmofile = file.split('.')[0]+'.sncosmo' out = open(cosmofile,'w') out.write('time band flux fluxerr zp zpsys\n') for i,f,fe,flt,m in zip(range(len(flux)),flux,fluxerr,filt,mjd): out.write(str(m)+' des'+str(flt)+' '+str(f)+' '+str(fe)+' 27.5 ab\n') out.close() lc = sncosmo.read_lc(cosmofile) lightcurves.append(lc) params.append({'snid':snid,'type':sntype,'hostz':hostz,'hostz_std':hostz_std}) print(snid,sntype,hostz,hostz_std) hostzs = np.array(hostzs) hostz_stds = np.array(hostz_std) snids = np.array(snids) sntypes = np.array(sntypes,dtype='float') print('unique types',np.unique(sntypes)) dsntypes = np.zeros(len(sntypes)) dsntypes[sntypes > 1] = np.ones(len(sntypes[sntypes > 1])) dsntypes[sntypes == 1] = np.zeros(len(sntypes[sntypes == 1])) dsntypes[sntypes == -9] = np.zeros(len(sntypes[sntypes == -9]))-9 print('nonia',len(dsntypes[dsntypes == 1])) print('ia',len(dsntypes[dsntypes == 0])) print('none',len(dsntypes[dsntypes == -9])) 'FEED ONLY LCs WITH NON -9 TYPES THROUGH ALL THE LIGHCURVE FITS AND SPLIT THAT INTO TEST AND TRAIN' knownlightcurves = [] knownparams = [] knowntypes = [] blindlightcurves = [] blindparams = [] for lc,t,p in zip(lightcurves,dsntypes,params): if t == -9: blindlightcurves.append(lc) blindparams.append(p) else: knownlightcurves.append(lc) knownparams.append(p) knowntypes.append(t) ndim = 1 + 1 + 1 + len(sources) + 2 fitparams = np.zeros((len(knownlightcurves), ndim)) #THIS NEEDS TO CUT OUT -9S fitparams, truetypes = lcfit(knownlightcurves, knownparams, fitparams, knowntypes, sources) print('fitparams.shape', fitparams.shape) print('truetypes.shape', truetypes.shape) np.savez('sncc_known2.npz', big_data_array=fitparams, truetypes=truetypes, params=knownparams) print('saved sncc_known2.npz') return
x0=5.52396533233e-05, x1=-1.62106970624, c=0.079535505334) times = np.array([ 54346.219, 54356.262, 54358.207, 54359.172, 54365.238, 54373.313, 54382.246, 54386.25, 54388.254, 54393.238, 54403.168, 54406.16, 54412.16, 54416.156, 54420.184, 54421.164, 54423.156, 54425.156, 54431.164, 54433.1 ]) model_rcov = model._bandflux_rcov('sdssg', times) relerr2 = np.diag(model_rcov) - model_rcov[0, 1] for i, t in enumerate(times): print(i, t, relerr2[i]) data = sncosmo.read_lc('jla_light_curves/lc-SDSS19230.list', format='salt2', read_covmat=True) # print(data) data = sncosmo.photdata.photometric_data(data) mask = sncosmo.fitting._data_mask(data, model.get('t0'), model.get('z'), (-15., 45.), (3000., 7000.)) # print(len(data[mask])) # print(sncosmo.chisq(data[mask], model, modelcov=True)) # print(data[mask]) result, fitted_model = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c'], guess_amplitude=False, guess_t0=False, phase_range=(-15., 45.), wave_range=(3000., 7000.),
def snname_from_fname(fname): return re.match(REGEX, fname).groups()[0] if __name__ == "__main__": model = sncosmo.Model(source='salt2', effects=[sncosmo.F99Dust()], effect_names=['mw'], effect_frames=['obs']) fnames = glob.glob("jla_light_curves/lc-SDSS19230.list") for fname in fnames[0:1]: snname = snname_from_fname(fname) data = sncosmo.read_lc(fname, format='salt2', read_covmat=True) model.set(mwebv=data.meta['MWEBV'], z=data.meta['Z_HELIO']) t0 = time.time() result, m = sncosmo.fit_lc(data, model, ['t0', 'x0', 'x1', 'c'], modelcov=True, phase_range=(-15., 45.), wave_range=(3000., 7000.), verbose=True) print("time:", time.time() - t0, 's') print(result) snfit_result = snfitio.read_snfit_result('results_snfit/result-{}.dat' .format(snname)) print(snfit_result)
import os import numpy as np import sncosmo lcs = sncosmo.read_lc('../lc.standardsystem.sesn_allphot.dat') sne = np.unique(lcs['Name']) for s in sne: sncosmo.write_lc(lcs[lcs['Name'] == s], 'lc_' + s + '.dat')
from mpi4py import MPI from bolomc import bump, burns __whatami__ = "Construct a bolometric light curve from CSP data." __author__ = "Danny Goldstein <*****@*****.**>" comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() # Shorthand for a few variables with long names. od94 = sncosmo.OD94Dust f99 = sncosmo.F99Dust # Read the LC to fit. lc = sncosmo.read_lc('scripts/sn2011fe.lc', format='ascii') name = lc.meta['name'] # Configure the properties of the host galaxy dust. dust_type = od94 bintype = 'gmm' # rv, ebv my_jobs = [(3.1, 0.)] if len(my_jobs) == 0: result = None # this processor will be idle param_names = None else: result = [] param_names = []
def do_stuff(ctr): p = pb.ProgressBar(maxval=740, widgets = [pb.Percentage(),pb.Bar(),pb.ETA()]).start() pbctr = 0 for i,l in enumerate(lc): p.update(pbctr) pbctr += 1 restcut = (3000,7000) data = sncosmo.read_lc(l, format='salt2') try: z = data.meta['Redshift'] except: pass try: z = data.meta['Z_CMB'] except: pass try: survey = data.meta['SURVEY'] except: pass try: survey = data.meta['SET'] except: pass nickname = data.meta['SN'] try: nickname = str(int(nickname)) except: pass mwebv = data.meta['MWEBV'] dust = sncosmo.CCM89Dust() data = astropy.table.Table(data, masked=True) #rename columns so that my fitter can handle things data.rename_column('Filter', 'tmp') data.rename_column('Date', 'time') data.rename_column('Flux', 'flux') data.rename_column('Fluxerr', 'fluxerr') data.rename_column('MagSys', 'zpsys') data.rename_column('ZP', 'zp') if survey == 'SNLS': sn_nickname = l.split('/')[-1].split('.')[0].split('-')[-1] band = [] for j, bp in enumerate(data['tmp']): band.append( '%s-%s' %(sn_nickname, bp) ) band = astropy.table.Column(band, name='band') data.add_column(band) data.remove_column('tmp') else: data.rename_column('tmp', 'band') # deal with swope filters mask = (data['band'] == 'SWOPE2::V') nswopev = len(mask.nonzero()[0]) if nswopev > 0: band = [] for j, bp in enumerate(data['band']): if (bp == 'SWOPE2::V'): if (data['time'][j] < 53749): band.append('swope2::v_lc3014') elif (data['time'][j] < 53760): band.append('swope2::v_lc3009') else: band.append('swope2::v_lc9844') else: band.append(bp) data.remove_column('band') band = astropy.table.Column(band, name='band') data.add_column(band) ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53749.) & ((data['time']<=53760.)) ) data['band'][ind] = 'swope2::v_lc3009' ind = np.where( (data['band'] == 'SWOPE2::V') & (data['time']>53760.) ) data['band'][ind] = 'swope2::v_lc9844' # print ind #deal with filter coverage #also deal with STANDARD filter zeropoints unique_bands = np.unique(data['band']) fit_bands = [] nofit_bands = [] # print unique_bands tmperr = np.copy(data['fluxerr']) for ub in unique_bands: # print ub bp = sncosmo.get_bandpass(ub) rest = bp.wave_eff / (1.0+z) if (rest >= restcut[0]) & (rest <= restcut[1]): fit_bands.append(ub) else: nofit_bands.append(ub) if 'STANDARD' in ub: ind = np.where(data['band'] == ub) data['zp'][ind] = data['zp'][ind] - float(standard_zps[ub]) errcor = 10**(-0.4*standard_zps[ub]) data['fluxerr'][ind] *= errcor if '4SHOOTER2' in ub: ind = np.where(data['band'] == ub) data['zp'][ind] = data['zp'][ind] - float(FourShooter_zps[ub]) errcor = 10**(-0.4*FourShooter_zps[ub]) data['fluxerr'][ind] *= errcor if 'KEPLERCAM' in ub: ind = np.where(data['band'] == ub) data['zp'][ind] = data['zp'][ind] - float(keplercam_zps[ub]) errcor = 10**(-0.4*keplercam_zps[ub]) data['fluxerr'][ind] *= errcor # print ub # print data['zp'][ind] if 'swope' in ub.lower(): ind = np.where(data['band'] == ub) data['zp'][ind] = data['zp'][ind] - float(swope_zps[ub]) errcor = 10**(-0.4*swope_zps[ub]) data['fluxerr'][ind] *= errcor if 'sdss' in ub.lower(): ind = np.where(data['band'] == ub) data['zp'][ind] = data['zp'][ind] - float(sdss_zps[ub]) errcor = 10**(-0.4*sdss_zps[ub]) data['fluxerr'][ind] *= errcor for nfb in nofit_bands: mask = data['band'] == nfb for c in data.colnames: data[c].mask = (data[c].mask | mask) mwebv = data.meta['MWEBV'] mask = data['band'].mask.nonzero()[0] data.remove_rows(mask) ind = np.where(lcfits['SN'] == nickname) t0 = lcfits['DayMax'][ind][0] x1r, x1f, c = np.random.multivariate_normal(mean,cov,size=1)[0] mu = cosmo.distmod(z).value absmag = MB - ar*x1r - af*x1f + beta*c + np.random.normal(scale=sigint, size=1)[0] mB = mu + absmag source = Salt2XSource(version='2.4', modeldir=modeldir) model = sncosmo.Model(source=source, effects=[dust], effect_names=['mw'], effect_frames=['obs']) model.set(z=z, x1=x1f, s=x1r, c=c, t0=t0, mwebv=mwebv) model.set_source_peakabsmag(absmag, 'bessellb', 'ab') flux = model.bandflux(data['band'], data['time'], zp=data['zp'], zpsys=data['zpsys']) whocares, saltcov = model.bandfluxcov(data['band'], data['time'], data['zp'],data['zpsys']) # handle model cov blowups saltcov = np.copy(saltcov) diag = np.copy(saltcov.diagonal()) model_snr = whocares/np.sqrt(diag) ind = np.where((np.abs(model_snr) < 1) & ~np.isnan(model_snr)) diag[ind] = diag[ind] * np.abs(model_snr[ind])**2 np.fill_diagonal(saltcov, diag) diagerr = np.diag(data['fluxerr']**2) fullcov = saltcov + diagerr try: np.linalg.cholesky(fullcov) except: print 'Cholesky failed... exiting' sys.exit() noise = np.random.multivariate_normal(np.zeros(len(diagerr)), fullcov, size=1)[0] #lower zp, lower flux data['flux'] = flux + noise data.meta['x1r'] = x1r data.meta['x1f'] = x1f data.meta['c'] = c data.meta['alpha_r'] = ar data.meta['alpha_f'] = af data.meta['beta'] = beta data.meta['MB'] = MB data.meta['mB'] = mu+MB data.meta['DayMax'] = t0 data.meta['cosmology'] = 'Planck15' data.rename_column('band', 'Filter') data.rename_column('time', 'Date') data.rename_column('flux', 'Flux') data.rename_column('fluxerr', 'Fluxerr') data.rename_column('zpsys', 'MagSys') data.rename_column('zp', 'ZP') if survey == 'SNLS': for row in data: tmp = row['Filter'] ind = tmp.find('MEGACAM') row['Filter'] = row['Filter'][ind:] sncosmo.write_lc(data,'./cadence_sim/lc/%s_%s.list' %(nickname, ctr), format='salt2') p.finish()