def add_manual_monitoringlist_entries(dataset_id, inputs): """Parses co-ords from self.inputs, loads them into the monitoringlist""" monitor_coords = [] if "monitor_coords" in inputs: try: monitor_coords.extend(json.loads(inputs["monitor_coords"])) except ValueError: logger.error("Could not parse monitor-coords from command line") return False if "monitor_list" in inputs: try: mon_list = json.load(open(inputs["monitor_list"])) monitor_coords.extend(mon_list) except ValueError: logger.error("Could not parse monitor-coords from file: " + inputs["monitor_list"]) return False if len(monitor_coords): logger.info("You specified monitoring at coords:") for i in monitor_coords: logger.info("RA, %f ; Dec, %f " % (i[0], i[1])) for c in monitor_coords: dataset = DataSet(id=dataset_id, database=Database()) dataset.add_manual_entry_to_monitoringlist(c[0], c[1]) return True
def test_update(self): """Update all or individual dataset columns""" dataset1 = DataSet(data={'description': 'dataset 1'}, ) self.assertEqual(dataset1.description, "dataset 1") dataset1.update(rerun=5, description="new dataset") self.database.cursor.execute( "SELECT rerun, description FROM dataset WHERE id=%s", (dataset1.id,)) results = self.database.cursor.fetchone() self.assertEqual(results[0], 5) self.assertEqual(results[1], "new dataset") self.assertEqual(dataset1.description, "new dataset") self.assertEqual(dataset1.rerun, 5)
def test_create(self): image_data_dicts = db_subs.generate_timespaced_dbimages_data( n_images=2) dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) #self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=image_data_dicts[0]) # Images are automatically added to their dataset #self.assertEqual(dataset1.images, set([image1])) self.assertEqual(image1.tau_time, image_data_dicts[0]['tau_time']) self.assertAlmostEqual(image1.freq_eff, image_data_dicts[0]['freq_eff']) image2 = Image(dataset=dataset1, data=image_data_dicts[1]) #self.assertEqual(dataset1.images, set([image1, image2])) dataset2 = DataSet(database=self.database, id=dataset1.id)
def store_images(images_metadata, extraction_radius_pix, dataset_id): """ Add images to database. Note that all images in one dataset should be inserted in one go, since the order is very important here. If you don't add them all in once, you should make sure they are added in the correct order e.g. sorted by observation time. Note: Should only be used in a master recipe Args: images_metadata: list of dicts containing image metadata extraction_radius_pix: (float) Used to calculate the 'skyregion' dataset_id: dataset id to be used. don't use value from parset file since this can be -1 (TraP way of setting auto increment) Returns: the database ID of this dataset """ database = Database() dataset = DataSet(id=dataset_id, database=database) image_ids = [] # sort images by timestamp images_metadata.sort(key=lambda m: m['taustart_ts']) for metadata in images_metadata: metadata['xtr_radius'] = extraction_radius_pix * abs( metadata['deltax']) filename = metadata['url'] db_image = Image(data=metadata, dataset=dataset) image_ids.append(db_image.id) logger.info("stored %s with ID %s" % (os.path.basename(filename), db_image.id)) return image_ids
def test_infinite(self): # Check that database insertion doesn't choke on infinite beam # parameters. test_specific_image_data = { 'beam_smaj_pix': float('inf'), 'beam_smin_pix': float('inf'), 'beam_pa_rad': float('inf'), } image_data_dicts = db_subs.generate_timespaced_dbimages_data( n_images=1, **test_specific_image_data) image_data = image_data_dicts[0] dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) image1 = Image(dataset=dataset1, data=image_data) bmaj, bmin, bpa = tkp.db.execute( """ SELECT rb_smaj, rb_smin, rb_pa FROM image WHERE image.id = %(id)s """, { "id": image1.id }).fetchone() self.assertEqual(bmaj, float('inf')) self.assertEqual(bmin, float('inf')) self.assertEqual(bpa, float('inf'))
def test_create(self): """Create a new dataset, and retrieve it""" dataset1 = DataSet(data={'description': 'dataset 1'}) # The name for the following dataset will be ignored, and set # to the name of the dataset with dsid = dsid dataset2 = DataSet(id=dataset1.id) # update some stuff dataset2.update() self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) # 'data' is ignored if dsid is given: dataset3 = DataSet(data={'description': 'dataset 3'}, id=dataset1.id) self.assertEqual(dataset3.description, "dataset 1") self.assertEqual(dataset3.id, dataset1.id)
def test_max_bandwidth(self): """ Test if setting max bandwidth correctly affects the band of images """ data = copy(self.image_data) dataset_data = {'description': self._testMethodName} dataset = DataSet(data=dataset_data, database=self.database) data['freq_eff'] = 50e6 # 50 MHz data['freq_bw'] = 2e6 # 2 MHz data['freq_bw_max'] = 0.0 # no band association limiting first_image = Image(dataset=dataset, data=data) # this image should be assigned the same band, since within bandwidth data['freq_eff'] = 51e6 # 50 MHz data['freq_bw'] = 2e6 # 2 MHz data['freq_bw_max'] = 0.0 # no band association limiting assocated_image = Image(dataset=dataset, data=data) self.assertEqual(get_band_for_image(first_image), get_band_for_image(assocated_image)) # this image should *not* be assigned the same band, since bandwidth is # limited data['freq_eff'] = 47e6 # 50 MHz data['freq_bw'] = 5e6 # 2 MHz data['freq_bw_max'] = 0.5e5 # limit bandwith to 0.5 MHz assocated_image = Image(dataset=dataset, data=data) self.assertNotEqual(get_band_for_image(first_image), get_band_for_image(assocated_image))
def test_frequency_range(self): """ Determine range of frequencies supported by DB schema. """ def get_freq_for_image(image): # Returns the stored frequency corresponding to a particular image. return tkp.db.execute( """ SELECT freq_central FROM image ,frequencyband WHERE image.id = %(id)s AND image.band = frequencyband.id """, { "id": image.id }).fetchone()[0] dataset = DataSet(data={'description': self._testMethodName}, database=self.database) data = copy(self.image_data) data['freq_eff'] = 1e6 # 1MHz data['freq_bw'] = 1e3 # 1KHz mhz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(mhz_freq_image)) data['freq_eff'] = 100e9 # 100 GHz (e.g. CARMA) data['freq_bw'] = 5e9 # 5GHz ghz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(ghz_freq_image)) data['freq_eff'] = 5e15 # 5 PHz (e.g. UV obs) data['freq_bw'] = 1e14 # 5GHz phz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(phz_freq_image))
def create_dataset(dataset_id, description): """ Creates a dataset if it doesn't exists Note: Should only be used in a master recipe Returns: the database ID of this dataset """ database = Database() if dataset_id == -1: dataset = DataSet({'description': description}, database) logger.info("created dataset %s (%s)" % (dataset.id, dataset.description)) else: dataset = DataSet(id=dataset_id, database=database) logger.info("using dataset %s (%s)" % (dataset.id, dataset.description)) return dataset.id
def setUp(self): self.database = tkp.db.database.Database() self.dataset = DataSet( data={'description': "Src. assoc:" + self._testMethodName}, database=self.database) self.im_params = db_subs.generate_timespaced_dbimages_data(n_images=8) self.db_imgs = []
def test_update(self): """ Check that ORM-updates work for the Image class. """ dataset1 = DataSet( data={'description': 'dataset with changing images'}, database=self.database) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) # Now, update (without changing anything) and make sure it remains the # same (sanity check): image1.update() self.assertEqual(image1.tau_time, image_data['tau_time']) ##This time, change something: tau_time1 = image_data['tau_time'] + 100 tau_time2 = tau_time1 + 300 tau_time3 = tau_time2 + 300 tau_time4 = tau_time3 + 300 freq_eff1 = image_data['freq_eff'] * 1.2 image1.update(tau_time=tau_time1) self.assertEqual(image1.tau_time, tau_time1) # New 'image' orm-object, created from the database id of image1. image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, tau_time1) self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff']) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=tau_time2) self.assertAlmostEqual(image1.tau_time, tau_time1) image1.update() self.assertAlmostEqual(image1.tau_time, tau_time2) image1.update(tau_time=tau_time3) image2.update() self.assertAlmostEqual(image2.tau_time, tau_time3) #Test with multiple items: image1.update(tau_time=tau_time4, freq_eff=freq_eff1) self.assertAlmostEqual(image1.tau_time, tau_time4) self.assertAlmostEqual(image1.freq_eff, freq_eff1) #And that datetime conversion roundtrips work correctly: dtime0 = image_data['taustart_ts'] dtime1 = dtime0 + datetime.timedelta(days=3) self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime0) image2.update(taustart_ts=dtime1) #image1 orm-object not yet updated, still caches old value: self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime1) image1.update() self.assertEqual(image1.taustart_ts, dtime1)
def test_create2(self): dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) self.assertEqual(dataset1.images, set()) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) image2 = Image(dataset=dataset1, data=image_data) extractedsource_data = { 'ra': 123.123, 'decl': 23.23, 'ra_err': 21.1, 'decl_err': 21.09, 'ra_fit_err': 0.1, 'decl_fit_err': 0.1, 'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1, 'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33, 'racosdecl': 0.44, 'det_sigma': 10.0, 'ew_sys_err': 20, 'ns_sys_err': 20, 'error_radius': 10.0 } source1 = ExtractedSource(image=image1, data=extractedsource_data) extractedsource_data['ra'] = 45.45 extractedsource_data['decl'] = 55.55 source2 = ExtractedSource(image=image1, data=extractedsource_data) self.assertEqual(len(image1.sources), 2) # Source #3 points to the same source as #2 source3 = ExtractedSource(id=source2.id, database=self.database) # Which means there are no extra sources for image1 self.assertEqual(len(image1.sources), 2) srcids = set([source.id for source in image1.sources]) # If, however, we create a new source with # an image reference in the constructor, we get a # 'deep' copy: source4 = ExtractedSource(image=image1, id=source2.id) # Now there's a new source for image1! self.assertEqual(len(image1.sources), 3) # But if we filter on the source ids, # we see there are really only two sources srcids = set([source.id for source in image1.sources]) self.assertEqual(set([source1.id, source2.id]), srcids) extractedsource_data['ra'] = 89.89 extractedsource_data['decl'] = 78.78 source5 = ExtractedSource(image=image2, data=extractedsource_data) self.assertEqual(len(image2.sources), 1) self.assertEqual(image2.sources.pop().id, source5.id)
def test_create(self): """Create a new dataset, and retrieve it""" dataset1 = DataSet(data={'description': 'dataset 1'}) # The name for the following dataset will be ignored, and set # to the name of the dataset with dsid = dsid dataset2 = DataSet(id=dataset1.id) # update some stuff dataset2.update() self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) #dataset2.update(dsoutname='output.ms', # description='testing of dataset', # process_ts=datetime.datetime(1970, 1, 1)) dataset2.update(type=2, process_ts=datetime.datetime(1970, 1, 1)) self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) # 'data' is ignored if dsid is given: dataset3 = DataSet(data={'description': 'dataset 3'}, id=dataset1.id) self.assertEqual(dataset3.description, "dataset 1") self.assertEqual(dataset3.id, dataset1.id)
def test_set_process_timestamps(self): dataset = DataSet(data={'description': 'test dataset'}) time.sleep(3) update_dataset_process_end_ts(dataset.id) start_time, end_time = db_query( """ SELECT process_start_ts, process_end_ts FROM dataset WHERE id = %(id)s """, { "id": dataset.id }).fetchone() self.assertLess(start_time, end_time)
def test_frequency_difference(self): # Check that images which are at almost the same fall nicely into the # same band (unless they fall over a band boundary). See #4801. # Bands are 1 MHz wide and centred on the MHz. def get_band_for_image(image): # Returns the band number corresponding to a particular image. return tkp.db.execute( """ SELECT band FROM image WHERE image.id = %(id)s """, { "id": image.id }).fetchone()[0] data = copy(self.image_data) dataset1 = DataSet(data={'description': self._testMethodName}, database=self.database) # Example frequencies/bandwidths supplied in bug report. data['freq_eff'] = 124021911.62109375 data['freq_bw'] = 1757812.5 # image1 and image2 are exactly the same, so should be in the same # band. image1 = Image(dataset=dataset1, data=data) image2 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image2)) # Another image at a frequency 1 MHz different should be in # a different band... data['freq_eff'] = data['freq_eff'] - 1e6 image3 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image3)) # ...even if it has a huge bandwidth. data['freq_bw'] *= 100 image4 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image4)) # Finally, this image should be in the same band, since it's at an only # slightly different frequency. data['freq_eff'] = 123924255.37109375 data['freq_bw'] = 1953125.0 image5 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image5))
def test_update(self): image_data = db_subs.example_dbimage_data_dict() dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=image_data) image2 = Image(dataset=dataset1, data=image_data) extractedsource_data = { 'ra': 123.123, 'decl': 23.23, 'ra_err': 21.1, 'decl_err': 21.09, 'ra_fit_err': 0.1, 'decl_fit_err': 0.1, 'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1, 'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33, 'racosdecl': 0.44, 'det_sigma': 11.1, 'ew_sys_err': 20, 'ns_sys_err': 20, 'error_radius': 10.0 } source1 = ExtractedSource(image=image1, data=extractedsource_data) extractedsource_data['ra'] = 45.45 extractedsource_data['decl'] = 55.55 source2 = ExtractedSource(image=image1, data=extractedsource_data) self.assertEqual(len(image1.sources), 2) # Source #3 points to the same source as #2 source3 = ExtractedSource(id=source2.id, database=self.database) # Update source3 source3.update(decl=44.44) # But no change for #1 and #2 self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 55.55) self.assertAlmostEqual(source3.decl, 44.44) source1.update() # nothing changes for #1 self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 55.55) self.assertAlmostEqual(source3.decl, 44.44) # Now we make sure source #2 takes note of the change done through #3 source2.update() self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 44.44)
def test_create(self): dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) # create 4 images, separated by one day each image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict()) extracted_source_data = dict(zone=13, ra=12.12, decl=13.13, ra_err=21.1, decl_err=21.09, ra_fit_err=1.12, decl_fit_err=1.23, uncertainty_ew=0.1, uncertainty_ns=0.1, ew_sys_err=20, ns_sys_err=20, error_radius=10.0, x=0.11, y=0.22, z=0.33, racosdecl=0.44, det_sigma=10.) src1 = ExtractedSource(data=extracted_source_data, image=image) src2 = ExtractedSource(data=extracted_source_data, image=image, database=self.database) self.assertNotEqual(src1.id, src2.id) extracted_source_data['image'] = image.id src3 = ExtractedSource(data=extracted_source_data, database=self.database) extracted_source_data['ra'] = 23.23 src4 = ExtractedSource(data=extracted_source_data, database=self.database, id=src1.id) self.assertEqual(src1.id, src4.id) self.assertAlmostEqual(src1.ra, src4.ra) del extracted_source_data['x'] self.assertRaisesRegexp(AttributeError, "missing required data key: x", ExtractedSource, data=extracted_source_data, database=self.database)
def test_update(self): """Update all or individual dataset columns""" dataset1 = DataSet(data={'description': 'dataset 1'}, ) self.assertEqual(dataset1.description, "dataset 1") dataset1.update(rerun=5, description="new dataset") self.database.cursor.execute( "SELECT rerun, description FROM dataset WHERE id=%s", (dataset1.id, )) results = self.database.cursor.fetchone() self.assertEqual(results[0], 5) self.assertEqual(results[1], "new dataset") self.assertEqual(dataset1.description, "new dataset") self.assertEqual(dataset1.rerun, 5) dataset1.update(process_end_ts=datetime.datetime(1970, 1, 1)) self.assertEqual(dataset1.process_end_ts, datetime.datetime(1970, 1, 1))
def test_frequency_range(self): """ Determine range of frequencies supported by DB schema. """ dataset = DataSet(data={'description': self._testMethodName}, database=self.database) data = copy(self.image_data) data['freq_eff'] = 1e6 # 1MHz data['freq_bw'] = 1e3 # 1KHz mhz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(mhz_freq_image)) data['freq_eff'] = 100e9 # 100 GHz (e.g. CARMA) data['freq_bw'] = 5e9 # 5GHz ghz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(ghz_freq_image)) data['freq_eff'] = 5e15 # 5 PHz (e.g. UV obs) data['freq_bw'] = 1e14 # 100 THz phz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(phz_freq_image))
def test_infinite(self): # Check that database insertion doesn't choke on infinite errors. dataset = DataSet(data={'description': 'example dataset'}, database=self.database) image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict()) # Inserting a standard example extractedsource should be fine extracted_source = db_subs.example_extractedsource_tuple() image.insert_extracted_sources([extracted_source]) inserted = columns_from_table('extractedsource', where={'image': image.id}) self.assertEqual(len(inserted), 1) # But if the source has infinite errors we drop it and log a warning extracted_source = db_subs.example_extractedsource_tuple( error_radius=float('inf'), peak_err=float('inf'), flux_err=float('inf')) # We will add a handler to the root logger which catches all log # output in a buffer. iostream = BytesIO() hdlr = logging.StreamHandler(iostream) logging.getLogger().addHandler(hdlr) image.insert_extracted_sources([extracted_source]) logging.getLogger().removeHandler(hdlr) # We want to be sure that the error has been appropriately logged. self.assertIn("Dropped source fit with infinite flux errors", iostream.getvalue()) inserted = columns_from_table('extractedsource', where={'image': image.id}) self.assertEqual(len(inserted), 1)
def test_frequency_difference(self): # Check that images which are at almost the same fall nicely into the # same band (unless they fall over a band boundary). See #4801. # Bands are 1 MHz wide and centred on the MHz. data = copy(self.image_data) dataset1 = DataSet(data={'description': self._testMethodName}, database=self.database) # Example frequencies/bandwidths supplied in bug report. data['freq_eff'] = 124021911.62109375 data['freq_bw'] = 1757812.5 # image1 and image2 are exactly the same, so should be in the same # band. image1 = Image(dataset=dataset1, data=data) image2 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image2)) # Another image at a frequency 10 MHz different should be in # a different band... data['freq_eff'] -= 1e7 image3 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image3)) # but in the same if the bandwidths overlap data['freq_bw'] *= 100 image4 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image4)) # Finally, this image should be in the same band, since it's at an only # slightly different frequency. data['freq_eff'] = 123924255.37109375 data['freq_bw'] = 1953125.0 image5 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image5))
def test_insert(self): dataset1 = DataSet(data=self.description) monitor_positions = [(5., 5), (123, 85.)] dbgen.insert_monitor_positions(dataset1.id, monitor_positions)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff=frequency) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10. * i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err=data['decl_fit_err'], peak=data['i_peak'] * (1 + i), peak_err=data['i_peak_err'], flux=data['i_peak'] * (1 + i), flux_err=data['i_peak_err'], fit_type=data['fit_type']) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
class TestLightCurve(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightcurve(self): # make 4 * 5 images with different date images = [] for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': 80e6, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ra_sys_err': 20, 'decl_sys_err': 20, 'i_peak': 10 * i , 'i_peak_err': 0.1 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], # Gaussian fit errors data['i_peak'] * (1 + i), data['i_peak_err'], # Peak data['i_peak'] * (1 + i), data['i_peak_err'], # Integrated 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ra_sys_err'], data['decl_sys_err']) # Systematic errors sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images.pop().sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = sources[0].lightcurve() # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) # Since the light curves are very similar, only eta_nu is different results = dbtransients._select_updated_variability_indices(self.dataset.images[-1].id) results = sorted(results, key=itemgetter('eta_int')) for result, eta_nu in zip(results, (16666.66666667, 66666.666666667, 150000.0)): self.assertEqual(result['f_datapoints'], 4) self.assertAlmostEqual(result['eta_int'], eta_nu) self.assertAlmostEqual(result['v_int'], 0.516397779494)
def test_m2m_nullDetection(self): """ This tests that two sources (close-by to be associated if they were detected at different timesteps) which are not seen in the next image and thus have forced fits, will have separate light curves. The postions are from the previous test. """ data = {'description': "null detection:" + self._testMethodName} dataset = DataSet(data=data) # Three timesteps, just 1 band -> 3 images. taustart_tss = [ datetime.datetime(2013, 8, 1), datetime.datetime(2013, 9, 1), datetime.datetime(2013, 10, 1) ] freq_effs = [124] freq_effs = [f * 1e6 for f in freq_effs] im_params = db_subs.generate_timespaced_dbimages_data( len(freq_effs) * len(taustart_tss)) timestamps = itertools.repeat(taustart_tss, len(freq_effs)) for im, freq, ts in zip( im_params, itertools.cycle(freq_effs), itertools.chain.from_iterable(zip(*timestamps))): im['freq_eff'] = freq im['taustart_ts'] = ts images = [] for im in im_params: image = tkp.db.Image(dataset=dataset, data=im) images.append(image) # Arbitrary parameters, except that they fall inside our image # and close together (see previous test) src0 = db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5) src1 = db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5) # Group images in blocks of 4, corresponding to all frequency bands at # a given timestep. for images in zip(*(iter(images), ) * len(freq_effs)): for image in images: # The sources are only seen at timestep 0 if (image.taustart_ts == taustart_tss[0]): dbgen.insert_extracted_sources(image.id, [src0, src1], 'blind') else: pass for image in images: dbass.associate_extracted_sources(image.id, deRuiter_r=5.68, new_source_sigma_margin=3) nd_ids_pos = dbnd.get_nulldetections(image.id) # The null_detections are the positional inputs for the forced # fits, which on their turn return additional parameters, # e.g. from src0, src1 if image.taustart_ts == taustart_tss[0]: # There are no null detections at the first timestep self.assertEqual(len(nd_ids_pos), 0) elif image.taustart_ts == taustart_tss[1]: # src0 & src1 are null detections at the second timestep self.assertEqual(len(nd_ids_pos), 2) dbgen.insert_extracted_sources( image.id, [src0, src1], 'ff_nd', ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos]) else: # All other images have two null detections. self.assertEqual(len(nd_ids_pos), 2) dbgen.insert_extracted_sources( image.id, [src0, src1], 'ff_nd', ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos]) # And here we have to associate the null detections with the # runcat sources... dbnd.associate_nd(image.id) query = """\ SELECT id ,datapoints FROM runningcatalog r WHERE dataset = %(dataset_id)s ORDER BY datapoints """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have two runningcatalog sources, with a datapoint for # every image in which the sources were seen. self.assertEqual(len(result), 2) query = """\ SELECT r.id ,rf.band ,rf.f_datapoints FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset_id)s AND rf.runcat = r.id ORDER BY r.id ,rf.band """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have two runningcatalog_flux entries, # one for every source in the band, i.e. 2 x 1. self.assertEqual(len(result), 2) # Source 0: inserted into timestep 0. # Force-fits in images at next timesteps, # so 1+2 for band 0. self.assertEqual(result[0][2], 3) # Source 1: inserted into timestep 0 # Force-fits in images at next timesteps. # so 1+2 for bands 0 self.assertEqual(result[1][2], 3) #self.assertEqual(result[2][2], 2) #self.assertEqual(result[3][2], 2) # We should also have two lightcurves for both sources, # where source 1 has 3 datapoints in band0 (t1,t2,t3). # Source 2 also has 3 datapoints for band0 (t1,t2,t3). query = """\ SELECT a.runcat ,a.xtrsrc ,a.type ,i.band ,i.taustart_ts FROM assocxtrsource a ,extractedsource x ,image i WHERE a.xtrsrc = x.id AND x.image = i.id AND i.dataset = %(dataset_id)s ORDER BY a.runcat ,i.band ,i.taustart_ts """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # 3 + 3 entries for source 0 and 1 resp. self.assertEqual(len(result), 6) # The individual light-curve datapoints # Source1: new at t1, band0 self.assertEqual(result[0][2], 4) self.assertEqual(result[0][4], taustart_tss[0]) # Source1: Forced fit at t2, same band self.assertEqual(result[1][2], 7) self.assertEqual(result[1][3], result[0][3]) self.assertEqual(result[1][4], taustart_tss[1]) # Source1: Forced fit at t3, same band self.assertEqual(result[2][2], 7) self.assertEqual(result[2][3], result[1][3]) self.assertEqual(result[2][4], taustart_tss[2]) # Source2: new at t1, band0 self.assertEqual(result[3][2], 4) self.assertEqual(result[3][3], result[1][3]) self.assertEqual(result[3][4], taustart_tss[0]) # Source2: Forced fit at t2, band0 self.assertEqual(result[4][2], 7) self.assertEqual(result[4][3], result[3][3]) self.assertEqual(result[4][4], taustart_tss[1]) # Source2: Forced fit at t3, band0 self.assertEqual(result[5][2], 7) self.assertEqual(result[5][3], result[4][3]) self.assertEqual(result[5][4], taustart_tss[2])
def test_1to1_nullDetection(self): """ This tests that the two sources are associated if they were detected at different timesteps. The positions are used in the next test as well. """ data = {'description': "null detection:" + self._testMethodName} dataset = DataSet(data=data) # Two timesteps, just 1 band -> 2 images. taustart_tss = [ datetime.datetime(2013, 8, 1), datetime.datetime(2013, 9, 1) ] freq_effs = [124] freq_effs = [f * 1e6 for f in freq_effs] im_params = db_subs.generate_timespaced_dbimages_data( len(freq_effs) * len(taustart_tss)) timestamps = itertools.repeat(taustart_tss, len(freq_effs)) for im, freq, ts in zip( im_params, itertools.cycle(freq_effs), itertools.chain.from_iterable(zip(*timestamps))): im['freq_eff'] = freq im['taustart_ts'] = ts images = [] for im in im_params: image = tkp.db.Image(dataset=dataset, data=im) images.append(image) # Arbitrary parameters, except that they fall inside our image # and close together (see next test) src0 = db_subs.example_extractedsource_tuple(ra=122.985, dec=10.5) src1 = db_subs.example_extractedsource_tuple(ra=123.015, dec=10.5) # Group images in blocks of 4, corresponding to all frequency bands at # a given timestep. for images in zip(*(iter(images), ) * len(freq_effs)): for image in images: # The sources are only seen at timestep 0 if (image.taustart_ts == taustart_tss[0]): dbgen.insert_extracted_sources(image.id, [src0], 'blind') elif (image.taustart_ts == taustart_tss[1]): dbgen.insert_extracted_sources(image.id, [src1], 'blind') else: pass for image in images: dbass.associate_extracted_sources(image.id, deRuiter_r=5.68, new_source_sigma_margin=3) query = """\ SELECT id ,datapoints FROM runningcatalog r WHERE dataset = %(dataset_id)s ORDER BY datapoints """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have one runningcatalog sources, with two datapoints # for the images in which the sources were seen. self.assertEqual(len(result), 1) self.assertEqual(result[0][1], 2) query = """\ SELECT r.id ,rf.band ,rf.f_datapoints FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset_id)s AND rf.runcat = r.id ORDER BY r.id ,rf.band """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have one runningcatalog_flux entry, # where the source has two flux datapoints self.assertEqual(len(result), 1) self.assertEqual(result[0][2], 2)
class TestLightCurve(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightcurve(self): # make 4 images with different date images = [] image_datasets = db_subs.generate_timespaced_dbimages_data(n_images=4, taustart_ts= datetime.datetime(2010, 3, 3) ) for dset in image_datasets: image = Image(dataset=self.dataset, data=dset) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'i_peak': 10. * i , 'i_peak_err': 0.1, }) # Insert the 3 sources in each image, while further varying the flux lightcurves_sorted_by_ra = [[],[],[]] for im_idx, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. img_sources = [] for src_idx, data in enumerate(data_list): src = db_subs.example_extractedsource_tuple( ra = data['ra'],dec=data['decl'], peak=data['i_peak']* (1 + im_idx), flux = data['i_peak']* (1 + im_idx) ) lightcurves_sorted_by_ra[src_idx].append(src) img_sources.append(src) insert_extracted_sources(image._id, img_sources) associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick last image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = ligtcurve_func(sources[0]._id) # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) #Check the summary statistics (avg flux, etc) query = """\ SELECT rf.avg_f_int ,rf.avg_f_int_sq ,avg_weighted_f_int ,avg_f_int_weight FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset)s AND r.id = rf.runcat ORDER BY r.wm_ra """ self.database.cursor.execute(query, {'dataset': self.dataset.id}) runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor) self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra)) for idx, flux_summary in enumerate(runcat_flux_entries): py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) for key in flux_summary.keys(): self.assertAlmostEqual(flux_summary[key], py_results[-1][key]) #Now check the per-timestep statistics (variability indices) sorted_runcat_ids = columns_from_table('runningcatalog', where={'dataset':self.dataset.id}, order='wm_ra') sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids] for idx, rcid in enumerate(sorted_runcat_ids): db_indices = db_queries.get_assoc_entries(self.database, rcid) py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) self.assertEqual(len(db_indices), len(py_indices)) for nstep in range(len(db_indices)): for key in ('v_int', 'eta_int', 'f_datapoints'): self.assertAlmostEqual(db_indices[nstep][key], py_indices[nstep][key], places=5)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': frequency, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ew_sys_err': 20, 'ns_sys_err': 20, 'i_peak': 10*i, 'i_peak_err': 0.1, 'error_radius': 10.0 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], data['i_peak']*(1+i), data['i_peak_err'], data['i_peak']*(1+i), data['i_peak_err'], 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ew_sys_err'], data['ns_sys_err'], # Systematic errors data['error_radius']) sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
def test_nullDetection(self): data = {'description': "null detection:" + self._testMethodName} dataset = DataSet(data=data) # Three timesteps, each with 4 bands -> 12 images. taustart_tss = [ datetime.datetime(2013, 8, 1), datetime.datetime(2013, 9, 1), datetime.datetime(2013, 10, 1) ] freq_effs = [124, 149, 156, 185] freq_effs = [f * 1e6 for f in freq_effs] im_params = db_subs.generate_timespaced_dbimages_data( len(freq_effs) * len(taustart_tss)) timestamps = itertools.repeat(taustart_tss, len(freq_effs)) for im, freq, ts in zip( im_params, itertools.cycle(freq_effs), itertools.chain.from_iterable(zip(*timestamps))): im['freq_eff'] = freq im['taustart_ts'] = ts images = [] for im in im_params: image = tkp.db.Image(dataset=dataset, data=im) images.append(image) # Arbitrary parameters, except that they fall inside our image. src0 = db_subs.example_extractedsource_tuple(ra=122.5, dec=9.5) src1 = db_subs.example_extractedsource_tuple(ra=123.5, dec=10.5) # Group images in blocks of 4, corresponding to all frequency bands at # a given timestep. for images in zip(*(iter(images), ) * len(freq_effs)): for image in images: # The first source is only seen at timestep 0, band 0. # The second source is only seen at timestep 1, band 3. if (image.taustart_ts == taustart_tss[0] and image.freq_eff == freq_effs[0]): dbgen.insert_extracted_sources(image.id, [src0], 'blind') elif (image.taustart_ts == taustart_tss[1] and image.freq_eff == freq_effs[3]): dbgen.insert_extracted_sources(image.id, [src1], 'blind') else: pass for image in images: dbass.associate_extracted_sources(image.id, deRuiter_r=5.68, new_source_sigma_margin=3) nd_ids_pos = dbnd.get_nulldetections(image.id) # The null_detections are the positional inputs for the forced # fits, which on their turn return additional parameters, # e.g. from src0, src1 if image.taustart_ts == taustart_tss[0]: # There are no null detections at the first timestep self.assertEqual(len(nd_ids_pos), 0) elif image.taustart_ts == taustart_tss[1]: # src0 is a null detection at the second timestep self.assertEqual(len(nd_ids_pos), 1) dbgen.insert_extracted_sources( image.id, [src0], 'ff_nd', ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos]) else: # All other images have two null detections. self.assertEqual(len(nd_ids_pos), 2) dbgen.insert_extracted_sources( image.id, [src0, src1], 'ff_nd', ff_runcat_ids=[ids for ids, ra, decl in nd_ids_pos]) # And here we have to associate the null detections with the # runcat sources... dbnd.associate_nd(image.id) query = """\ SELECT id ,datapoints FROM runningcatalog r WHERE dataset = %(dataset_id)s ORDER BY datapoints """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have two runningcatalog sources, with a datapoint for # every image in which the sources were seen. self.assertEqual(len(result), 2) query = """\ SELECT r.id ,rf.band ,rf.f_datapoints FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset_id)s AND rf.runcat = r.id ORDER BY r.id ,rf.band """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # We should have eight runningcatalog_flux entries, # one for every source in every band, i.e. 2 x 4. # The number of flux datapoints differ per source, though self.assertEqual(len(result), 8) # Source 1: inserted into timestep 0, band 0. # Force-fits in band 0 images at next timesteps, # so 1+2 for band 0. self.assertEqual(result[0][2], 3) # Source 1: inserted into timestep 0, band 0. # Force-fits in bands 1,2,3 images at next timesteps. # so 0+2 for bands 1,2,3. self.assertEqual(result[1][2], 2) self.assertEqual(result[2][2], 2) self.assertEqual(result[3][2], 2) # Source 2: inserted into timestep 1, band 3. # Force-fits in band 0,1,2 images at next timestep, # so 1 for band 0,1,2 self.assertEqual(result[4][2], 1) self.assertEqual(result[5][2], 1) self.assertEqual(result[6][2], 1) # Source 2: inserted into timestep 1, band 3. # Force-fit in band 3 image at next timestep, # so 1+1 for band 3 self.assertEqual(result[7][2], 2) # We should also have two lightcurves for both sources, # where source 1 has 3 datapoints in band0 (t1,t2,t3) # and 2 datapoints for the other three bands (t2,t3). # Source 2 has two datapoints for band3 (t2,t3) and # one for the other three bands (t3). query = """\ SELECT a.runcat ,a.xtrsrc ,a.type ,i.band ,i.taustart_ts FROM assocxtrsource a ,extractedsource x ,image i WHERE a.xtrsrc = x.id AND x.image = i.id AND i.dataset = %(dataset_id)s ORDER BY a.runcat ,i.band ,i.taustart_ts """ cursor = tkp.db.execute(query, {'dataset_id': dataset.id}) result = cursor.fetchall() # 9 + 5 entries for source 1 and 2 resp. self.assertEqual(len(result), 14) # The individual light-curve datapoints # Source1: new at t1, band0 self.assertEqual(result[0][2], 4) self.assertEqual(result[0][4], taustart_tss[0]) # Source1: Forced fit at t2, same band self.assertEqual(result[1][2], 7) self.assertEqual(result[1][3], result[0][3]) self.assertEqual(result[1][4], taustart_tss[1]) # Source1: Forced fit at t3, same band self.assertEqual(result[2][2], 7) self.assertEqual(result[2][3], result[1][3]) self.assertEqual(result[2][4], taustart_tss[2]) # Source1: Forced fit at t2, band1 self.assertEqual(result[3][2], 7) self.assertTrue(result[3][3] > result[2][3]) self.assertEqual(result[3][4], taustart_tss[1]) # Source1: Forced fit at t3, band1 self.assertEqual(result[4][2], 7) self.assertEqual(result[4][3], result[3][3]) self.assertEqual(result[4][4], taustart_tss[2]) # Source1: Forced fit at t2, band2 self.assertEqual(result[5][2], 7) self.assertTrue(result[5][3] > result[4][3]) self.assertEqual(result[5][4], taustart_tss[1]) # Source1: Forced fit at t3, band2 self.assertEqual(result[6][2], 7) self.assertEqual(result[6][3], result[5][3]) self.assertEqual(result[6][4], taustart_tss[2]) # Source1: Forced fit at t2, band3 self.assertEqual(result[7][2], 7) self.assertTrue(result[7][3] > result[6][3]) self.assertEqual(result[7][4], taustart_tss[1]) # Source1: Forced fit at t3, band3 self.assertEqual(result[8][2], 7) self.assertEqual(result[8][3], result[7][3]) self.assertEqual(result[8][4], taustart_tss[2]) # Source2: Forced fit at t3, band0 self.assertEqual(result[9][2], 7) self.assertEqual(result[9][3], result[0][3]) self.assertEqual(result[9][4], taustart_tss[2]) # Source2: Forced fit at t3, band1 self.assertEqual(result[10][2], 7) self.assertTrue(result[10][3] > result[9][3]) self.assertEqual(result[10][4], taustart_tss[2]) # Source2: Forced fit at t3, band2 self.assertEqual(result[11][2], 7) self.assertTrue(result[11][3] > result[10][3]) self.assertEqual(result[11][4], taustart_tss[2]) # Source2: new at t2, band3 self.assertEqual(result[12][2], 4) self.assertTrue(result[12][3] > result[11][3]) self.assertEqual(result[12][4], taustart_tss[1]) # Source2: Forced fit at t3, band3 self.assertEqual(result[13][2], 7) self.assertEqual(result[13][3], result[12][3]) self.assertEqual(result[13][4], taustart_tss[2])
def setUp(self): data = {'description': "monitoringlist:" + self._testMethodName} self.dataset = DataSet(data=data) self.im_params = db_subs.generate_timespaced_dbimages_data(n_images=3)
def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}, database=self.database)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff = frequency ) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10.*i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err= data['decl_fit_err'], peak = data['i_peak']*(1+i), peak_err = data['i_peak_err'], flux = data['i_peak']*(1+i), flux_err = data['i_peak_err'], fit_type=data['fit_type'] ) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)