def store_config(config, dataset_id): """ Store a config defined in d into the database. Args: config (dict): nested dict containing config, [section][key] -> [value] """ logger.info("storing config to database for dataset %s" % dataset_id) error = "type of value %s, key %s in section %s has type %s, we only do %s" for section, v in config.items(): for key, value in v.items(): if key == 'password': logger.debug("not storing %s password to DB" % section) continue if type(value) not in types: msg = error % (value, key, section, type(value).__name__, ", ".join(t.__name__ for t in types)) logger.error(msg) raise TypeError(msg) values = { 'dataset': dataset_id, 'section': section, 'key': key, 'value': str(value), 'type': type(value).__name__ } execute(store_query, values)
def test_bad_type_in_db(self): """ fetch_config should raise TypeError if invalid type in DB """ q = """ INSERT INTO config (dataset, section, key, value, type) VALUES (%s, 'bad', 'type', '[]', 'list'); """ execute(q, (self.dataset_id,)) self.assertRaises(TypeError, fetch_config, self.dataset_id)
def test_bad_type_in_db(self): """ fetch_config should raise TypeError if invalid type in DB """ q = """ INSERT INTO config (dataset, section, key, value, type) VALUES (%s, 'bad', 'type', '[]', 'list'); """ execute(q, (self.dataset_id, )) self.assertRaises(TypeError, fetch_config, self.dataset_id)
def test_config_store(self): store_config(config, self.dataset_id) query1 = """ select count(id) from config where section=%s and key=%s and value=%s and dataset=%s """ args1 = ('section1', 'key1', 'value1', self.dataset_id) num1 = execute(query1, args1).rowcount self.assertEquals(num1, 1) args2 = ('section2', 'key3', 'value3', self.dataset_id) num2 = execute(query1, args2).rowcount self.assertEquals(num2, 1)
def _update_monitor_runcats(image_id): """ Update ``runcat`` col of ``monitor`` table for newly extracted positions. """ query = """\ UPDATE monitor SET runcat = (SELECT rc.id FROM runningcatalog rc JOIN extractedsource ex ON rc.xtrsrc = ex.id WHERE monitor.runcat is NULL AND ex.image = %(image_id)s AND ex.ff_monitor = monitor.id ) WHERE EXISTS (SELECT rc.id FROM runningcatalog rc JOIN extractedsource ex ON rc.xtrsrc = ex.id WHERE monitor.runcat is NULL AND ex.image = %(image_id)s AND ex.ff_monitor = monitor.id ) """ cursor = execute(query, {'image_id': image_id}, commit=True) up = cursor.rowcount logger.debug("Updated runcat cols for %s newly monitored sources" % up)
def setUpClass(cls): #NB This is basically nonsense, we're just selecting any old #extracted source so we can check the feature extraction syntax. xtrsrc_qry = """SELECT id as trigger_xtrsrc FROM extractedsource""" cursor = execute(xtrsrc_qry) cls.transients = get_db_rows_as_dicts(cursor)
def _insert_new_runcat(image_id): """Insert the fits of the monitoring sources as new sources into the runningcatalog """ query = """\ INSERT INTO runningcatalog (xtrsrc ,dataset ,datapoints ,zone ,wm_ra ,wm_decl ,avg_ra_err ,avg_decl_err ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,mon_src ) SELECT x.id AS xtrsrc ,i.dataset ,1 AS datapoints ,x.zone ,x.ra AS wm_ra ,x.decl AS wm_decl ,x.ra_err AS avg_ra_err ,x.decl_err AS avg_decl_err ,x.uncertainty_ew AS wm_uncertainty_ew ,x.uncertainty_ns AS wm_uncertainty_ns ,x.ra / (x.uncertainty_ew * x.uncertainty_ew) AS avg_wra ,x.decl / (x.uncertainty_ns * x.uncertainty_ns) AS avg_wdecl ,1 / (x.uncertainty_ew * x.uncertainty_ew) AS avg_weight_ra ,1 / (x.uncertainty_ns * x.uncertainty_ns) AS avg_weight_decl ,x.x ,x.y ,x.z ,TRUE FROM image i ,extractedsource x LEFT OUTER JOIN temprunningcatalog t ON t.xtrsrc = x.id WHERE i.id = %(image_id)s AND x.image = i.id AND x.extract_type = 2 AND t.xtrsrc IS NULL """ cursor = execute(query, {'image_id': image_id}, commit=True) ins = cursor.rowcount if ins > 0: logger.info("Added %s new monitoring sources to runningcatalog" % ins)
def _insert_new_runcat_flux(image_id): """Insert the fitted fluxes of the monitoring sources as new datapoints into the runningcatalog_flux. Extractedsources for which not a counterpart was found in the runningcatalog, i.e., those that do not have an entry in the tempruncat table (t0) will be added as a new source in the runningcatalog_flux table. """ query = """\ INSERT INTO runningcatalog_flux (runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT rc.id ,i.band ,i.stokes ,1 AS f_datapoints ,x.f_peak ,x.f_peak * x.f_peak ,1 / (x.f_peak_err * x.f_peak_err) ,x.f_peak / (x.f_peak_err * x.f_peak_err) ,x.f_peak * x.f_peak / (x.f_peak_err * x.f_peak_err) ,x.f_int ,x.f_int * x.f_int ,1 / (x.f_int_err * x.f_int_err) ,x.f_int / (x.f_int_err * x.f_int_err) ,x.f_int * x.f_int / (x.f_int_err * x.f_int_err) FROM image i JOIN extractedsource x ON i.id = x.image JOIN monitor mon ON x.ff_monitor = mon.id JOIN runningcatalog rc ON rc.xtrsrc = x.id WHERE i.id = %(image_id)s AND x.extract_type = 2 AND mon.runcat IS NULL """ cursor = execute(query, {'image_id': image_id}, commit=True) ins = cursor.rowcount if ins > 0: logger.debug("Added %s new monitoring fluxes to runningcatalog_flux" % ins)
def _insert_1_to_1_assoc(): """ The runcat-monitoring pairs are appended to the assocxtrsource (light-curve) table as a type = 9 datapoint. """ cursor = execute(ONE_TO_ONE_ASSOC_QUERY, {'type': 9}, commit=True) cnt = cursor.rowcount logger.debug( "Inserted %s runcat-monitoring source pairs in assocxtrsource" % cnt)
def _insert_new_runcat(image_id): """Insert the fits of the monitoring sources as new sources into the runningcatalog """ query = """\ INSERT INTO runningcatalog (xtrsrc ,dataset ,datapoints ,zone ,wm_ra ,wm_decl ,avg_ra_err ,avg_decl_err ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,mon_src ) SELECT x.id AS xtrsrc ,i.dataset ,1 AS datapoints ,x.zone ,x.ra AS wm_ra ,x.decl AS wm_decl ,x.ra_err AS avg_ra_err ,x.decl_err AS avg_decl_err ,x.uncertainty_ew AS wm_uncertainty_ew ,x.uncertainty_ns AS wm_uncertainty_ns ,x.ra / (x.uncertainty_ew * x.uncertainty_ew) AS avg_wra ,x.decl / (x.uncertainty_ns * x.uncertainty_ns) AS avg_wdecl ,1 / (x.uncertainty_ew * x.uncertainty_ew) AS avg_weight_ra ,1 / (x.uncertainty_ns * x.uncertainty_ns) AS avg_weight_decl ,x.x ,x.y ,x.z ,TRUE FROM image i JOIN extractedsource x ON i.id = x.image JOIN monitor mon ON x.ff_monitor = mon.id WHERE i.id = %(image_id)s AND x.extract_type = 2 AND mon.runcat IS NULL """ cursor = execute(query, {'image_id': image_id}, commit=True) ins = cursor.rowcount if ins > 0: logger.debug("Added %s new monitoring sources to runningcatalog" % ins)
def _insert_1_to_1_assoc(): """ The null detection forced fits are appended to the assocxtrsource (light-curve) table as a type = 7 datapoint. Subtable t1 has to take care of the cases where values and differences might get too small to cause divisions by zero. """ cursor = execute(ONE_TO_ONE_ASSOC_QUERY, {'type': 7}, commit=True) cnt = cursor.rowcount logger.debug("Inserted %s 1-to-1 null detections in assocxtrsource" % cnt)
def _increment_forcedfits_count(): """ Increment the forcedfits count for every runningcatalog entry in the temprunningcatalog table. """ query = """\ UPDATE runningcatalog SET forcedfits_count = forcedfits_count + 1 WHERE id IN ( SELECT t.runcat FROM temprunningcatalog t, runningcatalog r WHERE t.runcat = r.id ) """ execute(query)
def _insert_1_to_1_assoc(): """ The runcat-monitoring pairs are appended to the assocxtrsource (light-curve) table as a type = 9 datapoint. """ query = """\ INSERT INTO assocxtrsource (runcat ,xtrsrc ,type ,distance_arcsec ,r ,v_int ,eta_int ) SELECT t.runcat ,t.xtrsrc ,9 AS type ,0 AS distance_arcsec ,0 AS r ,t.v_int_inter / t.avg_f_int ,t.eta_int_inter / t.avg_f_int_weight FROM (SELECT runcat ,xtrsrc ,CASE WHEN avg_f_int = 0.0 THEN 0.000001 ELSE avg_f_int END AS avg_f_int ,avg_f_int_weight ,CASE WHEN f_datapoints = 1 THEN 0 ELSE CASE WHEN ABS(avg_f_int_sq - avg_f_int * avg_f_int) < 8e-14 THEN 0 ELSE SQRT(CAST(f_datapoints AS DOUBLE PRECISION) * (avg_f_int_sq - avg_f_int * avg_f_int) / (CAST(f_datapoints AS DOUBLE PRECISION) - 1.0) ) END END AS v_int_inter ,CASE WHEN f_datapoints = 1 THEN 0 ELSE (CAST(f_datapoints AS DOUBLE PRECISION) / (CAST(f_datapoints AS DOUBLE PRECISION) - 1.0)) * (avg_f_int_weight * avg_weighted_f_int_sq - avg_weighted_f_int * avg_weighted_f_int) END AS eta_int_inter FROM temprunningcatalog ) t """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Inserted %s runcat-monitoring source pairs in assocxtrsource" % cnt)
def store_config(config, dataset_id): """ Store a config defined in d into the database. Args: config (dict): nested dict containing config, [section][key] -> [value] """ logger.info("storing config to database for dataset %s" % dataset_id) error = "type of value %s, key %s in section %s has type %s, we only do %s" for section, v in config.items(): for key, value in v.items(): if key == 'password': logger.debug("not storing %s password to DB" % section) continue if type(value) not in types: msg = error % (value, key, section, type(value).__name__, ", ".join(t.__name__ for t in types)) logger.error(msg) raise TypeError(msg) values = {'dataset': dataset_id, 'section': section, 'key': key, 'value': str(value), 'type': type(value).__name__} execute(store_query, values)
def _insert_runcat_flux(): """Monitoring sources that were not yet fitted in this frequency band before, will be appended to it. Those have their first f_datapoint. """ query = """\ INSERT INTO runningcatalog_flux (runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq FROM temprunningcatalog WHERE f_datapoints = 1 """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.debug( "Inserted new-band fluxes for %s monitoring sources in runcat_flux" % cnt)
def _insert_new_1_to_1_assoc(image_id): """ The forced fits of the monitoring sources which are new are appended to the assocxtrsource (light-curve) table as a type = 8 datapoint. """ query = """\ INSERT INTO assocxtrsource (runcat ,xtrsrc ,type ,distance_arcsec ,r ,v_int ,eta_int ,f_datapoints ) SELECT rc.id ,rc.xtrsrc ,8 AS type ,0 ,0 ,0 AS v_int ,0 AS eta_int ,1 as f_datapoints FROM runningcatalog rc JOIN extractedsource x ON rc.xtrsrc = x.id JOIN image i on x.image = i.id JOIN monitor mon ON x.ff_monitor = mon.id WHERE i.id = %(image_id)s AND mon.runcat IS NULL AND x.extract_type = 2 """ cursor = execute(query, {'image_id': image_id}, commit=True) cnt = cursor.rowcount if cnt > 0: logger.debug( "Inserted %s new runcat-monitoring source pairs in assocxtrsource" % cnt)
def test_median(self): if Database().engine == "monetdb": qry = """ SELECT sys.median(id) as median_id ,sys.median(rms_max) as median_rms_max FROM image WHERE dataset = %(dataset_id)s """ else: qry = """ SELECT median(id) as median_id ,median(rms_max) as median_rms_max FROM image WHERE dataset = %(dataset_id)s """ cursor = execute(qry, {"dataset_id": self.dataset.id}) results = db_subs.get_db_rows_as_dicts(cursor) # self.assertAlmostEqual(results[0]['median_id'], median(self.image_ids)) self.assertAlmostEqual(results[0]["median_rms_max"], median([p["rms_max"] for p in self.im_params]))
def _insert_runcat_flux(): """Monitoring sources that were not yet fitted in this frequency band before, will be appended to it. Those have their first f_datapoint. """ query = """\ INSERT INTO runningcatalog_flux (runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq FROM temprunningcatalog WHERE f_datapoints = 1 """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Inserted new-band fluxes for %s monitoring sources in runcat_flux" % cnt)
def test_median(self): if Database().engine == 'monetdb': qry = (""" SELECT sys.median(id) as median_id ,sys.median(rms_max) as median_rms_max FROM image WHERE dataset = %(dataset_id)s """) else: qry = (""" SELECT median(id) as median_id ,median(rms_max) as median_rms_max FROM image WHERE dataset = %(dataset_id)s """) cursor = execute(qry, {'dataset_id': self.dataset.id}) results = db_subs.get_db_rows_as_dicts(cursor) # self.assertAlmostEqual(results[0]['median_id'], median(self.image_ids)) self.assertAlmostEqual(results[0]['median_rms_max'], median([p['rms_max'] for p in self.im_params]))
def fetch_config(dataset_id): """ Retrieve the stored config for given dataset id Returns: nested dict [section][key] -> [value] """ logger.info("fetching config from database for dataset %s" % dataset_id) error = "type in database is %s but we only support %s" result = execute(fetch_query, {'dataset': dataset_id}).fetchall() config = adict() for section, key, value, type_ in result: if type_ not in (t.__name__ for t in types): msg = error % (type_, ", ".join(t.__name__ for t in types)) logger.error(msg) raise TypeError(msg) converted = eval(type_)(value) if not section in config: config[section] = adict() config[section][key] = converted return config
def get_monitor_entries(dataset_id): """ Returns the ``monitor`` entries relevant to this dataset. Args: dataset_id (int): Parent dataset. Returns: list of tuples [(monitor_id, ra, decl)] """ query = """\ SELECT id ,ra ,decl FROM monitor WHERE dataset = %(dataset_id)s """ qry_params = {'dataset_id': dataset_id} cursor = execute(query, qry_params) res = cursor.fetchall() return res
def _insert_new_1_to_1_assoc(image_id): """ The forced fits of the monitoring sources which are new are appended to the assocxtrsource (light-curve) table as a type = 8 datapoint. """ query = """\ INSERT INTO assocxtrsource (runcat ,xtrsrc ,type ,distance_arcsec ,r ,v_int ,eta_int ) SELECT r0.id AS runcat ,r0.xtrsrc ,8 AS type ,0 AS distance_arcsec ,0 AS r ,0 AS v_int ,0 AS eta_int FROM (SELECT x1.id AS xtrsrc FROM extractedsource x1 LEFT OUTER JOIN temprunningcatalog trc1 ON x1.id = trc1.xtrsrc WHERE x1.image = %(image_id)s AND x1.extract_type = 2 AND trc1.xtrsrc IS NULL ) t0 ,runningcatalog r0 WHERE r0.xtrsrc = t0.xtrsrc """ cursor = execute(query, {'image_id': image_id}, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Inserted %s new runcat-monitoring source pairs in assocxtrsource" % cnt)
def get_nulldetections(image_id): """ Returns the runningcatalog sources which: * Are associated with the skyregion of the current image. * Do not have a counterpart in the extractedsources of the current image after source association has run. * Have been seen (in any band) at a timestamp earlier than that of the current image. NB This runs *after* the source association. We determine null detections only as those sources which have been seen at earlier times which don't appear in the current image. Sources which have not been seen earlier, and which appear in different bands at the current timestep, are *not* null detections, but are considered as "new" sources. Returns: list of tuples [(runcatid, ra, decl)] """ # The first temptable t0 looks for runcat sources that have been seen # in the same sky region as the current image, # but at an earlier timestamp, irrespective of the band. # The second temptable t1 returns the runcat source ids for those sources # that have an association with the current extracted sources. # The left outer join in combination with the t1.runcat IS NULL then # returns the runcat sources that could not be associated. query = """\ SELECT t0.id ,t0.wm_ra ,t0.wm_decl FROM (SELECT r0.id ,r0.wm_ra ,r0.wm_decl FROM image i0 ,assocskyrgn a0 ,runningcatalog r0 ,extractedsource x0 ,image i1 WHERE i0.id = %(image_id)s AND a0.skyrgn = i0.skyrgn AND r0.id = a0.runcat AND x0.id = r0.xtrsrc AND i1.id = x0.image AND i0.taustart_ts > i1.taustart_ts ) t0 LEFT OUTER JOIN (SELECT a.runcat FROM extractedsource x ,assocxtrsource a WHERE x.image = %(image_id)s AND a.xtrsrc = x.id ) t1 ON t0.id = t1.runcat WHERE t1.runcat IS NULL """ qry_params = {'image_id': image_id} cursor = execute(query, qry_params) res = cursor.fetchall() return res
def _update_runcat_flux(): """We only have to update those runcat sources that already had a detection, so their f_datapoints is larger than 1. """ query = """\ UPDATE runningcatalog_flux SET f_datapoints = (SELECT f_datapoints FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak = (SELECT avg_f_peak FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak_sq = (SELECT avg_f_peak_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak_weight = (SELECT avg_f_peak_weight FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_peak = (SELECT avg_weighted_f_peak FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_peak_sq = (SELECT avg_weighted_f_peak_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int = (SELECT avg_f_int FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int_sq = (SELECT avg_f_int_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int_weight = (SELECT avg_f_int_weight FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_int = (SELECT avg_weighted_f_int FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_int_sq = (SELECT avg_weighted_f_int_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) WHERE EXISTS (SELECT runcat FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.inactive = FALSE AND temprunningcatalog.f_datapoints > 1 ) """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Updated flux for %s null_detections" % cnt)
def _insert_tempruncat(image_id): """ Here the associations of forced fits and their runningcatalog counterparts are inserted into the temporary table. We follow the implementation of the normal association procedure, except that we don't need to match with a De Ruiter radius, since the counterpart pairs are from the same runningcatalog source. """ query = """\ INSERT INTO temprunningcatalog (runcat ,xtrsrc ,distance_arcsec ,r ,dataset ,band ,stokes ,datapoints ,zone ,wm_ra ,wm_decl ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_ra_err ,avg_decl_err ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT t0.runcat ,t0.xtrsrc ,0 as distance_arcsec ,0 as r ,t0.dataset ,t0.band ,t0.stokes ,t0.datapoints ,t0.zone ,t0.wm_ra ,t0.wm_decl ,t0.wm_uncertainty_ew ,t0.wm_uncertainty_ns ,t0.avg_ra_err ,t0.avg_decl_err ,t0.avg_wra ,t0.avg_wdecl ,t0.avg_weight_ra ,t0.avg_weight_decl ,t0.x ,t0.y ,t0.z ,CASE WHEN rf.f_datapoints IS NULL THEN 1 ELSE rf.f_datapoints + 1 END AS f_datapoints ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak + t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak_sq + t0.f_peak * t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_f_peak_weight + 1 / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_f_peak_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak + t0.f_peak / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak_sq + (t0.f_peak * t0.f_peak) / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int + t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int_sq + t0.f_int * t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_f_int_weight + 1 / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_f_int_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int + t0.f_int / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int_sq + (t0.f_int * t0.f_int) / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int_sq FROM (SELECT r.id AS runcat ,x.id AS xtrsrc ,x.f_peak ,x.f_peak_err ,x.f_int ,x.f_int_err ,i.dataset ,i.band ,i.stokes ,r.datapoints ,r.zone ,r.wm_ra ,r.wm_decl ,r.wm_uncertainty_ew ,r.wm_uncertainty_ns ,r.avg_ra_err ,r.avg_decl_err ,r.avg_wra ,r.avg_wdecl ,r.avg_weight_ra ,r.avg_weight_decl ,r.x ,r.y ,r.z FROM runningcatalog r ,image i ,extractedsource x WHERE i.id = %(image_id)s AND r.dataset = i.dataset AND x.image = i.id AND x.image = %(image_id)s AND x.extract_type = 1 AND r.mon_src = FALSE AND r.zone BETWEEN CAST(FLOOR(x.decl - x.error_radius/3600) AS INTEGER) AND CAST(FLOOR(x.decl + x.error_radius/3600) AS INTEGER) AND r.wm_decl BETWEEN x.decl - x.error_radius/3600 AND x.decl + x.error_radius/3600 AND r.wm_ra BETWEEN x.ra - alpha(x.error_radius/3600, x.decl) AND x.ra + alpha(x.error_radius/3600, x.decl) AND r.x * x.x + r.y * x.y + r.z * x.z > COS(RADIANS(r.wm_uncertainty_ew)) ) t0 LEFT OUTER JOIN runningcatalog_flux rf ON t0.runcat = rf.runcat AND t0.band = rf.band AND t0.stokes = rf.stokes """ qry_params = {'image_id': image_id} cursor = execute(query, qry_params, commit=True) cnt = cursor.rowcount logger.info("Inserted %s null detections in tempruncat" % cnt)
def get_nulldetections(image_id, expiration=10): """ Returns the runningcatalog sources which: * Are associated with the skyregion of the current image. * Do not have a counterpart in the extractedsources of the current image after source association has run. * Have been seen (in any band) at a timestamp earlier than that of the current image. NB This runs *after* the source association. We determine null detections only as those sources which have been seen at earlier times which don't appear in the current image. Sources which have not been seen earlier, and which appear in different bands at the current timestep, are *not* null detections, but are considered as "new" sources. args: image_id (int): database ID of image expiration (int): number of forced fits performed after a blind fit): Returns: list of tuples [(runcatid, ra, decl)] """ # The first temptable t0 looks for runcat sources that have been seen # in the same sky region as the current image, # but at an earlier timestamp, irrespective of the band. # The second temptable t1 returns the runcat source ids for those sources # that have an association with the current extracted sources. # The left outer join in combination with the t1.runcat IS NULL then # returns the runcat sources that could not be associated. query = """\ SELECT t0.id ,t0.wm_ra ,t0.wm_decl FROM (SELECT r0.id ,r0.wm_ra ,r0.wm_decl FROM image i0 ,assocskyrgn a0 ,runningcatalog r0 ,extractedsource x0 ,image i1 WHERE i0.id = %(image_id)s AND a0.skyrgn = i0.skyrgn AND r0.id = a0.runcat AND r0.forcedfits_count < %(expiration)s AND x0.id = r0.xtrsrc AND i1.id = x0.image AND i0.taustart_ts > i1.taustart_ts ) t0 LEFT OUTER JOIN (SELECT a.runcat FROM extractedsource x ,assocxtrsource a WHERE x.image = %(image_id)s AND a.xtrsrc = x.id ) t1 ON t0.id = t1.runcat WHERE t1.runcat IS NULL """ qry_params = {'image_id': image_id, 'expiration': expiration} cursor = execute(query, qry_params) res = cursor.fetchall() return res
def _insert_tempruncat(image_id): """ Here the associations of forced fits and their runningcatalog counterparts are inserted into the temporary table. We follow the analogies of the normal association procedure. The difference here is that we know what the runcat ids are for the extractedsource.extract_type = 1 (ff_nd) sources are, since these were inserted at the same time as well. This is why subtable t0 looks simpler than in the normal case. We still have to do a left outer join with the runcat_flux table (rf), because fluxes might not be detected in other bands. Before being inserted the additional properties are calculated. """ query = """\ INSERT INTO temprunningcatalog (runcat ,xtrsrc ,distance_arcsec ,r ,dataset ,band ,stokes ,datapoints ,zone ,wm_ra ,wm_decl ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_ra_err ,avg_decl_err ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT t0.runcat ,t0.xtrsrc ,0 AS distance_arcsec ,0 AS r ,t0.dataset ,t0.band ,t0.stokes ,t0.datapoints ,t0.zone ,t0.wm_ra ,t0.wm_decl ,t0.wm_uncertainty_ew ,t0.wm_uncertainty_ns ,t0.avg_ra_err ,t0.avg_decl_err ,t0.avg_wra ,t0.avg_wdecl ,t0.avg_weight_ra ,t0.avg_weight_decl ,t0.x ,t0.y ,t0.z ,CASE WHEN rf.f_datapoints IS NULL THEN 1 ELSE rf.f_datapoints + 1 END AS f_datapoints ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak + t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak_sq + t0.f_peak * t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_f_peak_weight + 1 / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_f_peak_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak + t0.f_peak / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak_sq + (t0.f_peak * t0.f_peak) / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int + t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int_sq + t0.f_int * t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_f_int_weight + 1 / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_f_int_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int + t0.f_int / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int_sq + (t0.f_int * t0.f_int) / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int_sq FROM (SELECT r.id AS runcat ,x.id AS xtrsrc ,x.f_peak ,x.f_peak_err ,x.f_int ,x.f_int_err ,i.dataset ,i.band ,i.stokes ,r.datapoints ,r.zone ,r.wm_ra ,r.wm_decl ,r.wm_uncertainty_ew ,r.wm_uncertainty_ns ,r.avg_ra_err ,r.avg_decl_err ,r.avg_wra ,r.avg_wdecl ,r.avg_weight_ra ,r.avg_weight_decl ,r.x ,r.y ,r.z FROM extractedsource x ,image i ,runningcatalog r WHERE x.image = %(image_id)s AND x.extract_type = 1 AND i.id = x.image AND r.id = x.ff_runcat AND r.mon_src = FALSE ) t0 LEFT OUTER JOIN runningcatalog_flux rf ON t0.runcat = rf.runcat AND t0.band = rf.band AND t0.stokes = rf.stokes """ qry_params = {'image_id': image_id} cursor = execute(query, qry_params, commit=True) cnt = cursor.rowcount logger.debug("Inserted %s null detections in tempruncat" % cnt)
def _insert_new_runcat_flux(image_id): """Insert the fitted fluxes of the monitoring sources as new datapoints into the runningcatalog_flux. Extractedsources for which not a counterpart was found in the runningcatalog, i.e., those that do not have an entry in the tempruncat table (t0) will be added as a new source in the runningcatalog_flux table. """ query = """\ INSERT INTO runningcatalog_flux (runcat ,band ,stokes ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT r0.id ,i0.band ,i0.stokes ,1 AS f_datapoints ,x0.f_peak ,x0.f_peak * x0.f_peak ,1 / (x0.f_peak_err * x0.f_peak_err) ,x0.f_peak / (x0.f_peak_err * x0.f_peak_err) ,x0.f_peak * x0.f_peak / (x0.f_peak_err * x0.f_peak_err) ,x0.f_int ,x0.f_int * x0.f_int ,1 / (x0.f_int_err * x0.f_int_err) ,x0.f_int / (x0.f_int_err * x0.f_int_err) ,x0.f_int * x0.f_int / (x0.f_int_err * x0.f_int_err) FROM image i0 ,(SELECT x1.id AS xtrsrc FROM extractedsource x1 LEFT OUTER JOIN temprunningcatalog trc1 ON x1.id = trc1.xtrsrc WHERE x1.image = %(image_id)s AND x1.extract_type = 2 AND trc1.xtrsrc IS NULL ) t0 ,runningcatalog r0 ,extractedsource x0 WHERE i0.id = %(image_id)s AND r0.xtrsrc = t0.xtrsrc AND x0.id = r0.xtrsrc AND x0.extract_type = 2 """ cursor = execute(query, {'image_id': image_id}, commit=True) ins = cursor.rowcount if ins > 0: logger.info("Added %s new monitoring fluxes to runningcatalog_flux" % ins)
def _update_runcat(): """Update the running catalog (positional) properties of the monitoring sources with the values in temprunningcatalog""" query = """\ UPDATE runningcatalog SET datapoints = (SELECT datapoints FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,zone = (SELECT zone FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,wm_ra = (SELECT wm_ra FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,wm_decl = (SELECT wm_decl FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_ra_err = (SELECT avg_ra_err FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_decl_err = (SELECT avg_decl_err FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,wm_uncertainty_ew = (SELECT wm_uncertainty_ew FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,wm_uncertainty_ns = (SELECT wm_uncertainty_ns FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_wra = (SELECT avg_wra FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_wdecl = (SELECT avg_wdecl FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_weight_ra = (SELECT avg_weight_ra FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,avg_weight_decl = (SELECT avg_weight_decl FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,x = (SELECT x FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,y = (SELECT y FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) ,z = (SELECT z FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) WHERE EXISTS (SELECT runcat FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog.id ) """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Updated %s monitoring sources in runcat" % cnt)
def dataset_images(dataset_id, database=None): q = "SELECT id FROM image WHERE dataset=%(dataset)s LIMIT 1" args = {'dataset': dataset_id} cursor = execute(q, args) image_ids = [x[0] for x in cursor.fetchall()] return image_ids
def _update_runcat_flux(): """We only have to update those runcat fluxes that already had a fit, so their f_datapoints is larger than 1. The ones had a fit in another band, but not in this are handled by the _insert_runcat_flux(). """ query = """\ UPDATE runningcatalog_flux SET f_datapoints = (SELECT f_datapoints FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak = (SELECT avg_f_peak FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak_sq = (SELECT avg_f_peak_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_peak_weight = (SELECT avg_f_peak_weight FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_peak = (SELECT avg_weighted_f_peak FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_peak_sq = (SELECT avg_weighted_f_peak_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int = (SELECT avg_f_int FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int_sq = (SELECT avg_f_int_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_f_int_weight = (SELECT avg_f_int_weight FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_int = (SELECT avg_weighted_f_int FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) ,avg_weighted_f_int_sq = (SELECT avg_weighted_f_int_sq FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) WHERE EXISTS (SELECT runcat FROM temprunningcatalog WHERE temprunningcatalog.runcat = runningcatalog_flux.runcat AND temprunningcatalog.band = runningcatalog_flux.band AND temprunningcatalog.stokes = runningcatalog_flux.stokes AND temprunningcatalog.f_datapoints > 1 ) """ cursor = execute(query, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Updated fluxes for %s monitoring sources in runcat_flux" % cnt)
def _insert_tempruncat(image_id): """ Here the associations of forced fits of the monitoring sources and their runningcatalog counterparts are inserted into the temporary table. We follow the implementation of the normal association procedure, except that we don't need to match with a De Ruiter radius, since the counterpart pairs are from the same runningcatalog source. """ # The query is as follows: # t0 searches for matches between the monitoring sources # (extract_type = 2) in the current image that have # a counterpart among the runningcatalog sources. This # matching is done by zone, decl, ra (corrected for alpha # infaltion towards the poles) and the dot product by # using the Cartesian coordinates. Note that the conical # distance is not determined by the De Ruiter radius, # since all these sources have identical positions. # t0 has a left outer join with the runningcatalog_flux table, # since the image might be of a new frequency band. In that case # all the rf.values are NULL. # The select then determines all the new (statistical) properties # for the runcat-monitoring pairs, which are inserted in the # tempruncat table. # Note that a first image does not have any matches, # but that is taken into account by the second part of # the associate_ms() function. query = """\ INSERT INTO temprunningcatalog (runcat ,xtrsrc ,distance_arcsec ,r ,dataset ,band ,stokes ,datapoints ,zone ,wm_ra ,wm_decl ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_ra_err ,avg_decl_err ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT t0.runcat ,t0.xtrsrc ,0 as distance_arcsec ,0 as r ,t0.dataset ,t0.band ,t0.stokes ,t0.datapoints ,t0.zone ,t0.wm_ra ,t0.wm_decl ,t0.wm_uncertainty_ew ,t0.wm_uncertainty_ns ,t0.avg_ra_err ,t0.avg_decl_err ,t0.avg_wra ,t0.avg_wdecl ,t0.avg_weight_ra ,t0.avg_weight_decl ,t0.x ,t0.y ,t0.z ,CASE WHEN rf.f_datapoints IS NULL THEN 1 ELSE rf.f_datapoints + 1 END AS f_datapoints ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak + t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak_sq + t0.f_peak * t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_f_peak_weight + 1 / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_f_peak_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak + t0.f_peak / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak_sq + (t0.f_peak * t0.f_peak) / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int + t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int_sq + t0.f_int * t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_f_int_weight + 1 / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_f_int_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int + t0.f_int / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int_sq + (t0.f_int * t0.f_int) / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int_sq FROM (SELECT r.id AS runcat ,x.id AS xtrsrc ,x.f_peak ,x.f_peak_err ,x.f_int ,x.f_int_err ,i.dataset ,i.band ,i.stokes ,r.datapoints + 1 AS datapoints ,r.zone ,r.wm_ra ,r.wm_decl ,r.wm_uncertainty_ew ,r.wm_uncertainty_ns ,r.avg_ra_err ,r.avg_decl_err ,r.avg_wra ,r.avg_wdecl ,r.avg_weight_ra ,r.avg_weight_decl ,r.x ,r.y ,r.z FROM runningcatalog r ,image i ,extractedsource x WHERE i.id = %(image_id)s AND r.dataset = i.dataset AND x.image = i.id AND x.image = %(image_id)s AND x.extract_type = 2 AND r.mon_src = TRUE AND r.zone BETWEEN CAST(FLOOR(x.decl - x.error_radius/3600) AS INTEGER) AND CAST(FLOOR(x.decl + x.error_radius/3600) AS INTEGER) AND r.wm_decl BETWEEN x.decl - x.error_radius/3600 AND x.decl + x.error_radius/3600 AND r.wm_ra BETWEEN x.ra - alpha(x.error_radius/3600, x.decl) AND x.ra + alpha(x.error_radius/3600, x.decl) AND r.x * x.x + r.y * x.y + r.z * x.z > COS(RADIANS(r.wm_uncertainty_ew)) ) t0 LEFT OUTER JOIN runningcatalog_flux rf ON t0.runcat = rf.runcat AND t0.band = rf.band AND t0.stokes = rf.stokes """ qry_params = {'image_id': image_id} cursor = execute(query, qry_params, commit=True) cnt = cursor.rowcount if cnt > 0: logger.info("Inserted %s monitoring-runcat candidate pairs in tempruncat" % cnt)
def _insert_tempruncat(image_id): """ Here the associations of forced fits of the monitoring sources and their runningcatalog counterparts are inserted into the temporary table. We follow the implementation of the normal association procedure, except that we don't need to match with a De Ruiter radius, since the counterpart pairs are from the same runningcatalog source. """ # The query is as follows: # t0 searches for matches between the monitoring sources # (extract_type = 2) in the current image that have # a counterpart among the runningcatalog sources. This # matching is done by zone, decl, ra (corrected for alpha # infaltion towards the poles) and the dot product by # using the Cartesian coordinates. Note that the conical # distance is not determined by the De Ruiter radius, # since all these sources have identical positions. # t0 has a left outer join with the runningcatalog_flux table, # since the image might be of a new frequency band. In that case # all the rf.values are NULL. # The select then determines all the new (statistical) properties # for the runcat-monitoring pairs, which are inserted in the # tempruncat table. # Note that a first image does not have any matches, # but that is taken into account by the second part of # the associate_ms() function. query = """\ INSERT INTO temprunningcatalog (runcat ,xtrsrc ,distance_arcsec ,r ,dataset ,band ,stokes ,datapoints ,zone ,wm_ra ,wm_decl ,wm_uncertainty_ew ,wm_uncertainty_ns ,avg_ra_err ,avg_decl_err ,avg_wra ,avg_wdecl ,avg_weight_ra ,avg_weight_decl ,x ,y ,z ,f_datapoints ,avg_f_peak ,avg_f_peak_sq ,avg_f_peak_weight ,avg_weighted_f_peak ,avg_weighted_f_peak_sq ,avg_f_int ,avg_f_int_sq ,avg_f_int_weight ,avg_weighted_f_int ,avg_weighted_f_int_sq ) SELECT t0.runcat_id ,t0.xtrsrc ,0 as distance_arcsec ,0 as r ,t0.dataset ,t0.band ,t0.stokes ,t0.datapoints ,t0.zone ,t0.wm_ra ,t0.wm_decl ,t0.wm_uncertainty_ew ,t0.wm_uncertainty_ns ,t0.avg_ra_err ,t0.avg_decl_err ,t0.avg_wra ,t0.avg_wdecl ,t0.avg_weight_ra ,t0.avg_weight_decl ,t0.x ,t0.y ,t0.z ,CASE WHEN rf.f_datapoints IS NULL THEN 1 ELSE rf.f_datapoints + 1 END AS f_datapoints ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak + t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak ELSE (rf.f_datapoints * rf.avg_f_peak_sq + t0.f_peak * t0.f_peak) / (rf.f_datapoints + 1) END AS avg_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_f_peak_weight + 1 / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_f_peak_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak + t0.f_peak / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_peak * t0.f_peak / (t0.f_peak_err * t0.f_peak_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_peak_sq + (t0.f_peak * t0.f_peak) / (t0.f_peak_err * t0.f_peak_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_peak_sq ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int + t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int ELSE (rf.f_datapoints * rf.avg_f_int_sq + t0.f_int * t0.f_int) / (rf.f_datapoints + 1) END AS avg_f_int_sq ,CASE WHEN rf.f_datapoints IS NULL THEN 1 / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_f_int_weight + 1 / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_f_int_weight ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int + t0.f_int / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int ,CASE WHEN rf.f_datapoints IS NULL THEN t0.f_int * t0.f_int / (t0.f_int_err * t0.f_int_err) ELSE (rf.f_datapoints * rf.avg_weighted_f_int_sq + (t0.f_int * t0.f_int) / (t0.f_int_err * t0.f_int_err)) / (rf.f_datapoints + 1) END AS avg_weighted_f_int_sq FROM (SELECT mon.runcat AS runcat_id ,x.id AS xtrsrc ,x.f_peak ,x.f_peak_err ,x.f_int ,x.f_int_err ,i.dataset ,i.band ,i.stokes ,r.datapoints + 1 AS datapoints ,r.zone ,r.wm_ra ,r.wm_decl ,r.wm_uncertainty_ew ,r.wm_uncertainty_ns ,r.avg_ra_err ,r.avg_decl_err ,r.avg_wra ,r.avg_wdecl ,r.avg_weight_ra ,r.avg_weight_decl ,r.x ,r.y ,r.z FROM monitor mon JOIN extractedsource x ON mon.id = x.ff_monitor JOIN runningcatalog r ON mon.runcat = r.id JOIN image i ON x.image = i.id WHERE mon.runcat IS NOT NULL AND x.image = %(image_id)s AND x.extract_type = 2 ) t0 LEFT OUTER JOIN runningcatalog_flux rf ON t0.runcat_id = rf.runcat AND t0.band = rf.band AND t0.stokes = rf.stokes """ qry_params = {'image_id': image_id} cursor = execute(query, qry_params, commit=True) cnt = cursor.rowcount logger.debug("Inserted %s monitoring-runcat pairs in tempruncat" % cnt)