def read_light_curves_from_field(field, ccd=None, N_per_ccd=0, clean=True, randomize=False): """ Read some number of light curves from a given PTF field Parameters ---------- field : int, pdb.Field The PTF field ccd : int, pdb.CCD (optional) The PTF CCD to grab light curves from. If None, uses all CCDs N_per_ccd : int The number of light curves to grab PER CCD. 0 means all. clean : bool Clean the light curves and require > min_number_good_observations. randomize : bool Grab random light curves vs. consecutive light curves. """ field = pdb.Field(field, "R") if ccd != None: ccds = [pdb.CCD(ccd, field, field.filter)] else: ccds = field.ccds.values() all_light_curves = [] for ccd in ccds: ccd_light_curves = [] chip = ccd.read() sources = chip.sources.readWhere("(ngoodobs > {})".format( pg.min_number_of_good_observations)) if randomize: np.random.shuffle(sources) for source in sources: light_curve = ccd.light_curve( source["matchedSourceID"], barebones=True, clean=True) # clean applies a quality cut to the data light_curve.db_indices = [ pu.source_index_name_to_pdb_index(source, index) for index in indices ] if light_curve == None or len( light_curve) < pg.min_number_of_good_observations: # If the light curve is not found, or has too few observations, skip this source_id continue ccd_light_curves.append(light_curve) if N_per_ccd != 0 and len(ccd_light_curves) >= N_per_ccd: break ccd.close() all_light_curves += ccd_light_curves return all_light_curves
def plot_best_light_curve(field_id, ccd_id): field = pdb.Field(field_id, "R") lc = get_best_light_curve(field, ccd_id) fig = plt.figure(figsize=(15,5)) ax = fig.add_subplot(111) lc.plot(ax) #ax.yaxis.set_ticks([]) from pylab import ScalarFormatter ax.yaxis.set_major_formatter(ScalarFormatter(False)) ax.set_xlabel("MJD", fontsize=20) ax.set_ylabel("$R$ [mag]", fontsize=20) fig.savefig("plots/new_detection_efficiency/example_light_curve_f{}_ccd{}.pdf".format(field.id, ccd_id), bbox_inches="tight")
args = parser.parse_args() if args.verbose: logger.setLevel(logging.DEBUG) elif args.quiet: logger.setLevel(logging.ERROR) else: logger.setLevel(logging.INFO) if args.test: test_magnitude_model() test_ln_likelihood() test_ln_prior() sys.exit(0) field = pdb.Field(args.field_id, "R") ccd = field.ccds[args.ccd_id] light_curve = ccd.light_curve(args.source_id, clean=args.clean, barebones=True) if args.source_id == 5466: idx = light_curve.mag > 18.25 light_curve.mjd = light_curve.mjd[idx] light_curve.mag = light_curve.mag[idx] light_curve.error = light_curve.error[idx] sampler = fit_model_to_light_curve(light_curve, nwalkers=args.walkers, nsamples=args.steps, nburn_in=args.burn_in) end_chain = sampler.flatchain[-args.walkers * 100:]
logger.setLevel(logging.INFO) #indices = ["eta","sigma_mu","j","k", "delta_chi_squared"] indices = ["eta","delta_chi_squared", "j"] config = dict() config["number_of_fpr_light_curves"] = args.limit config["number_of_fpr_simulations_per_light_curve"] = args.N config["number_of_microlensing_light_curves"] = args.limit*10 config["number_of_microlensing_simulations_per_light_curve"] = args.N np.random.seed(42) if args.plot: plot_best_light_curve(args.field_id, 2) field = pdb.Field(int(args.field_id), filter="R") simulated_microlensing_statistics, selected_distributions = detection_efficiency_for_field(field, \ config=config, \ overwrite=args.overwrite, \ indices=indices, plot=args.plot) sys.exit(0) fig, axes = plt.subplots(1, 1, sharex=True, figsize=(15,10)) for ii,field_id in enumerate([4588, 100152, 3756]): field = pdb.Field(field_id, "R") lc = get_best_light_curve(field, 2) axes.plot(lc.mjd, [ii]*len(lc.mjd), "ko", ms=6, alpha=0.15) axes.text(55200, ii+0.1, str(field_id)) axes.set_ylim(-0.2, 2.2) fig.savefig("plots/new_detection_efficiency/example_light_curves.png")
from __future__ import division import sys import time import numpy as np import ptf.db.photometric_database as pdb field = pdb.Field(4588, "R") ccd = field.ccds[0] chip = ccd.read() sourcedata = chip.sourcedata def test_quality_cut(source_id): a = time.time() srcdata = pdb.quality_cut(sourcedata, source_id=source_id) return time.time() - a def test_where(source_id): wheres = "(matchedSourceID == {})".format(source_id) a = time.time() data = [x.fetch_all_fields() for x in sourcedata.where(wheres)] data = np.array(data, dtype=sourcedata.dtype) data = data[(data["x_image"] > 15) & (data["x_image"] < 2033) & \ (data["y_image"] > 15) & (data["y_image"] < 4081) & \ (data["relPhotFlags"] < 4) & \ (data["mag"] > 14.3) & (data["mag"] < 21) & \ ((data["sextractorFlags"] & 251) == 0) & \ ((data["ipacFlags"] & 6077) == 0) & \ np.isfinite(data["mag"])]
len(field_ids))) if args.field_range: min_field_id, max_field_id = map(int, args.field_range.split("-")) all_fields = np.load("data/survey_coverage/fields_observations_R.npy") field_ids = all_fields[all_fields["num_exposures"] > \ min_number_of_good_observations]["field"] field_ids = field_ids[(field_ids >= min_field_id) & \ (field_ids < max_field_id)] for field_id in sorted(field_ids): # Skip field 101001 because the data hasn't been reduced by the PTF pipeline? if field_id == 101001: continue field = pdb.Field(field_id, "R") logger.info("Field: {}".format(field.id)) # There is some strangeness to getting the coordinates for a Field # -- this just makes it stupidproof try: if field.ra == None or field.dec == None: raise AttributeError() except AttributeError: logger.warn("Failed to get coordinates for this field!") continue # See if field is in database, remove it if we need to overwrite if args.overwrite: field_collection.remove({"_id" : field.id}) #light_curve_collection.remove({"field_id" : field.id})
def fields(self, min_num_observations): """ Return a list of fields with more than the above number of observations """ rows = self._fields_exposures[self._fields_exposures["num_exposures"] >= min_num_observations] fields = [pdb.Field(row["field"], self.filter, number_of_exposures=row["num_exposures"]) for row in rows] return [f for f in fields if f.ra != None]
# If the light curve is not found, or has too few observations, skip this source_id continue ccd_light_curves.append(light_curve) if N_per_ccd != 0 and len(ccd_light_curves) >= N_per_ccd: break ccd.close() all_light_curves += ccd_light_curves return all_light_curves # Configuration parameters indices = ["eta"] field = pdb.Field(3376, "R") num_trials = 25 N_per_ccd = 1000 logfile = open( os.path.join( os.path.split(pg._base_path)[0], "tests", "num_simulations_{0}.log".format(field.id)), "w") # Start the test all_light_curves = read_light_curves_from_field(field, N_per_ccd=N_per_ccd) print("Read in {0} light curves from Field {1} on {2} CCDs".format( len(all_light_curves), field.id, len(field.ccds)), file=logfile) all_eta_lower_criteria = dict()