def test_create(self): data = { 'freq_eff': 80e6, 'freq_bw': 1e6, 'taustart_ts': datetime.datetime(1999, 9, 9), 'url': '/path/to/image', 'tau_time': 0, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 0, 'centre_decl': 0, 'xtr_radius' : 3 } dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) #self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=data) # Images are automatically added to their dataset #self.assertEqual(dataset1.images, set([image1])) self.assertEqual(image1.tau_time, 0) self.assertAlmostEqual(image1.freq_eff, 80e6) image2 = Image(dataset=dataset1, data=data) #self.assertEqual(dataset1.images, set([image1, image2])) dataset2 = DataSet(database=self.database, id=dataset1.id) # Note that we can't test that dataset2.images = set([image1, image2]), # because dataset2.images are newly created Python objects, # with different ids #self.assertEqual(len(dataset2.images), 2) ##Now, update and try it again: image1.update() self.assertEqual(image1.tau_time, 0)
def test_max_bandwidth(self): """ Test if setting max bandwidth correctly affects the band of images """ data = copy(self.image_data) dataset_data = {'description': self._testMethodName} dataset = DataSet(data=dataset_data, database=self.database) data['freq_eff'] = 50e6 # 50 MHz data['freq_bw'] = 2e6 # 2 MHz data['freq_bw_max'] = 0.0 # no band association limiting first_image = Image(dataset=dataset, data=data) # this image should be assigned the same band, since within bandwidth data['freq_eff'] = 51e6 # 50 MHz data['freq_bw'] = 2e6 # 2 MHz data['freq_bw_max'] = 0.0 # no band association limiting assocated_image = Image(dataset=dataset, data=data) self.assertEqual(get_band_for_image(first_image), get_band_for_image(assocated_image)) # this image should *not* be assigned the same band, since bandwidth is # limited data['freq_eff'] = 47e6 # 50 MHz data['freq_bw'] = 5e6 # 2 MHz data['freq_bw_max'] = 0.5e5 # limit bandwith to 0.5 MHz assocated_image = Image(dataset=dataset, data=data) self.assertNotEqual(get_band_for_image(first_image), get_band_for_image(assocated_image))
def test_frequency_range(self): """ Determine range of frequencies supported by DB schema. """ def get_freq_for_image(image): # Returns the stored frequency corresponding to a particular image. return tkp.db.execute( """ SELECT freq_central FROM image ,frequencyband WHERE image.id = %(id)s AND image.band = frequencyband.id """, { "id": image.id }).fetchone()[0] dataset = DataSet(data={'description': self._testMethodName}, database=self.database) data = copy(self.image_data) data['freq_eff'] = 1e6 # 1MHz data['freq_bw'] = 1e3 # 1KHz mhz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(mhz_freq_image)) data['freq_eff'] = 100e9 # 100 GHz (e.g. CARMA) data['freq_bw'] = 5e9 # 5GHz ghz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(ghz_freq_image)) data['freq_eff'] = 5e15 # 5 PHz (e.g. UV obs) data['freq_bw'] = 1e14 # 5GHz phz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(phz_freq_image))
def test_infinite(self): # Check that database insertion doesn't choke on infinite errors. dataset = DataSet(data={'description': 'example dataset'}, database=self.database) image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict()) # Inserting a standard example extractedsource should be fine extracted_source = db_subs.example_extractedsource_tuple() image.insert_extracted_sources([extracted_source]) inserted = columns_from_table('extractedsource', where= {'image' : image.id}) self.assertEqual(len(inserted), 1) # But if the source has infinite errors we drop it and log a warning extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf'), peak_err=float('inf'), flux_err=float('inf')) # We will add a handler to the root logger which catches all log # output in a buffer. iostream = BytesIO() hdlr = logging.StreamHandler(iostream) logging.getLogger().addHandler(hdlr) image.insert_extracted_sources([extracted_source]) logging.getLogger().removeHandler(hdlr) # We want to be sure that the error has been appropriately logged. self.assertIn("Dropped source fit with infinite flux errors", iostream.getvalue()) inserted = columns_from_table('extractedsource', where= {'image' : image.id}) self.assertEqual(len(inserted), 1)
def test_create2(self): dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) self.assertEqual(dataset1.images, set()) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) image2 = Image(dataset=dataset1, data=image_data) extractedsource_data = { 'ra': 123.123, 'decl': 23.23, 'ra_err': 21.1, 'decl_err': 21.09, 'ra_fit_err': 0.1, 'decl_fit_err': 0.1, 'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1, 'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33, 'racosdecl': 0.44, 'det_sigma': 10.0, 'ew_sys_err': 20, 'ns_sys_err': 20, 'error_radius': 10.0 } source1 = ExtractedSource(image=image1, data=extractedsource_data) extractedsource_data['ra'] = 45.45 extractedsource_data['decl'] = 55.55 source2 = ExtractedSource(image=image1, data=extractedsource_data) self.assertEqual(len(image1.sources), 2) # Source #3 points to the same source as #2 source3 = ExtractedSource(id=source2.id, database=self.database) # Which means there are no extra sources for image1 self.assertEqual(len(image1.sources), 2) srcids = set([source.id for source in image1.sources]) # If, however, we create a new source with # an image reference in the constructor, we get a # 'deep' copy: source4 = ExtractedSource(image=image1, id=source2.id) # Now there's a new source for image1! self.assertEqual(len(image1.sources), 3) # But if we filter on the source ids, # we see there are really only two sources srcids = set([source.id for source in image1.sources]) self.assertEqual(set([source1.id, source2.id]), srcids) extractedsource_data['ra'] = 89.89 extractedsource_data['decl'] = 78.78 source5 = ExtractedSource(image=image2, data=extractedsource_data) self.assertEqual(len(image2.sources), 1) self.assertEqual(image2.sources.pop().id, source5.id)
def test_infinite(self): # Check that database insertion doesn't choke on infinite errors dataset = DataSet(data={'description': 'example dataset'}, database=self.database) image = Image(dataset=dataset, data=db_subs.example_dbimage_datasets(1)[0]) # Inserting an example extractedsource should be fine extracted_source = db_subs.example_extractedsource_tuple() image.insert_extracted_sources([extracted_source]) # But it should also be fine if the source has infinite errors extracted_source = db_subs.example_extractedsource_tuple(error_radius=float('inf')) image.insert_extracted_sources([extracted_source])
def test_frequency_difference(self): # Check that images which are at almost the same fall nicely into the # same band (unless they fall over a band boundary). See #4801. # Bands are 1 MHz wide and centred on the MHz. def get_band_for_image(image): # Returns the band number corresponding to a particular image. return tkp.db.execute( """ SELECT band FROM image WHERE image.id = %(id)s """, { "id": image.id }).fetchone()[0] data = copy(self.image_data) dataset1 = DataSet(data={'description': self._testMethodName}, database=self.database) # Example frequencies/bandwidths supplied in bug report. data['freq_eff'] = 124021911.62109375 data['freq_bw'] = 1757812.5 # image1 and image2 are exactly the same, so should be in the same # band. image1 = Image(dataset=dataset1, data=data) image2 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image2)) # Another image at a frequency 1 MHz different should be in # a different band... data['freq_eff'] = data['freq_eff'] - 1e6 image3 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image3)) # ...even if it has a huge bandwidth. data['freq_bw'] *= 100 image4 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image4)) # Finally, this image should be in the same band, since it's at an only # slightly different frequency. data['freq_eff'] = 123924255.37109375 data['freq_bw'] = 1953125.0 image5 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image5))
def test_create(self): image_data_dicts = db_subs.generate_timespaced_dbimages_data( n_images=2) dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) #self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=image_data_dicts[0]) # Images are automatically added to their dataset #self.assertEqual(dataset1.images, set([image1])) self.assertEqual(image1.tau_time, image_data_dicts[0]['tau_time']) self.assertAlmostEqual(image1.freq_eff, image_data_dicts[0]['freq_eff']) image2 = Image(dataset=dataset1, data=image_data_dicts[1]) #self.assertEqual(dataset1.images, set([image1, image2])) dataset2 = DataSet(database=self.database, id=dataset1.id)
def test_update(self): image_data = db_subs.example_dbimage_data_dict() dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=image_data) image2 = Image(dataset=dataset1, data=image_data) extractedsource_data = { 'ra': 123.123, 'decl': 23.23, 'ra_err': 21.1, 'decl_err': 21.09, 'ra_fit_err': 0.1, 'decl_fit_err': 0.1, 'uncertainty_ew': 0.1, 'uncertainty_ns': 0.1, 'zone': 1, 'x': 0.11, 'y': 0.22, 'z': 0.33, 'racosdecl': 0.44, 'det_sigma': 11.1, 'ew_sys_err': 20, 'ns_sys_err': 20, 'error_radius': 10.0 } source1 = ExtractedSource(image=image1, data=extractedsource_data) extractedsource_data['ra'] = 45.45 extractedsource_data['decl'] = 55.55 source2 = ExtractedSource(image=image1, data=extractedsource_data) self.assertEqual(len(image1.sources), 2) # Source #3 points to the same source as #2 source3 = ExtractedSource(id=source2.id, database=self.database) # Update source3 source3.update(decl=44.44) # But no change for #1 and #2 self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 55.55) self.assertAlmostEqual(source3.decl, 44.44) source1.update() # nothing changes for #1 self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 55.55) self.assertAlmostEqual(source3.decl, 44.44) # Now we make sure source #2 takes note of the change done through #3 source2.update() self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source1.decl, 23.23) self.assertAlmostEqual(source2.decl, 44.44)
def store_images(images_metadata, extraction_radius_pix, dataset_id): """ Add images to database. Note that all images in one dataset should be inserted in one go, since the order is very important here. If you don't add them all in once, you should make sure they are added in the correct order e.g. sorted by observation time. Note: Should only be used in a master recipe Args: images_metadata: list of dicts containing image metadata extraction_radius_pix: (float) Used to calculate the 'skyregion' dataset_id: dataset id to be used. don't use value from parset file since this can be -1 (TraP way of setting auto increment) Returns: the database ID of this dataset """ database = Database() dataset = DataSet(id=dataset_id, database=database) image_ids = [] # sort images by timestamp images_metadata.sort(key=lambda m: m['taustart_ts']) for metadata in images_metadata: metadata['xtr_radius'] = extraction_radius_pix * abs( metadata['deltax']) filename = metadata['url'] db_image = Image(data=metadata, dataset=dataset) image_ids.append(db_image.id) logger.info("stored %s with ID %s" % (os.path.basename(filename), db_image.id)) return image_ids
def test_infinite(self): # Check that database insertion doesn't choke on infinite beam # parameters. test_specific_image_data = { 'beam_smaj_pix': float('inf'), 'beam_smin_pix': float('inf'), 'beam_pa_rad': float('inf'), } image_data_dicts = db_subs.generate_timespaced_dbimages_data( n_images=1, **test_specific_image_data) image_data = image_data_dicts[0] dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) image1 = Image(dataset=dataset1, data=image_data) bmaj, bmin, bpa = tkp.db.execute( """ SELECT rb_smaj, rb_smin, rb_pa FROM image WHERE image.id = %(id)s """, { "id": image1.id }).fetchone() self.assertEqual(bmaj, float('inf')) self.assertEqual(bmin, float('inf')) self.assertEqual(bpa, float('inf'))
def test_frequency_range(self): """ Determine range of frequencies supported by DB schema. """ dataset = DataSet(data={'description': self._testMethodName}, database=self.database) data = copy(self.image_data) data['freq_eff'] = 1e6 # 1MHz data['freq_bw'] = 1e3 # 1KHz mhz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(mhz_freq_image)) data['freq_eff'] = 100e9 # 100 GHz (e.g. CARMA) data['freq_bw'] = 5e9 # 5GHz ghz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(ghz_freq_image)) data['freq_eff'] = 5e15 # 5 PHz (e.g. UV obs) data['freq_bw'] = 1e14 # 100 THz phz_freq_image = Image(dataset=dataset, data=data) self.assertEqual(data['freq_eff'], get_freq_for_image(phz_freq_image))
def test_frequency_difference(self): # Check that images which are at almost the same fall nicely into the # same band (unless they fall over a band boundary). See #4801. # Bands are 1 MHz wide and centred on the MHz. data = copy(self.image_data) dataset1 = DataSet(data={'description': self._testMethodName}, database=self.database) # Example frequencies/bandwidths supplied in bug report. data['freq_eff'] = 124021911.62109375 data['freq_bw'] = 1757812.5 # image1 and image2 are exactly the same, so should be in the same # band. image1 = Image(dataset=dataset1, data=data) image2 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image2)) # Another image at a frequency 10 MHz different should be in # a different band... data['freq_eff'] -= 1e7 image3 = Image(dataset=dataset1, data=data) self.assertNotEqual(get_band_for_image(image1), get_band_for_image(image3)) # but in the same if the bandwidths overlap data['freq_bw'] *= 100 image4 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image4)) # Finally, this image should be in the same band, since it's at an only # slightly different frequency. data['freq_eff'] = 123924255.37109375 data['freq_bw'] = 1953125.0 image5 = Image(dataset=dataset1, data=data) self.assertEqual(get_band_for_image(image1), get_band_for_image(image5))
def test_null_case_sequential(self): """test_null_case_sequential -Check extractedsource insertion routines can deal with empty input! -Check source association can too """ for im in self.im_params: self.db_imgs.append(Image(data=im, dataset=self.dataset)) self.db_imgs[-1].insert_extracted_sources([], 'blind') self.db_imgs[-1].associate_extracted_sources( deRuiter_r, new_source_sigma_margin) running_cat = columns_from_table( table="runningcatalog", keywords="*", where={"dataset": self.dataset.id}) self.assertEqual(len(running_cat), 0)
def test_create(self): dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) # create 4 images, separated by one day each image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict()) extracted_source_data = dict(zone=13, ra=12.12, decl=13.13, ra_err=21.1, decl_err=21.09, ra_fit_err=1.12, decl_fit_err=1.23, uncertainty_ew=0.1, uncertainty_ns=0.1, ew_sys_err=20, ns_sys_err=20, error_radius=10.0, x=0.11, y=0.22, z=0.33, racosdecl=0.44, det_sigma=10.) src1 = ExtractedSource(data=extracted_source_data, image=image) src2 = ExtractedSource(data=extracted_source_data, image=image, database=self.database) self.assertNotEqual(src1.id, src2.id) extracted_source_data['image'] = image.id src3 = ExtractedSource(data=extracted_source_data, database=self.database) extracted_source_data['ra'] = 23.23 src4 = ExtractedSource(data=extracted_source_data, database=self.database, id=src1.id) self.assertEqual(src1.id, src4.id) self.assertAlmostEqual(src1.ra, src4.ra) del extracted_source_data['x'] self.assertRaisesRegexp(AttributeError, "missing required data key: x", ExtractedSource, data=extracted_source_data, database=self.database)
def test_infinite(self): # Check that database insertion doesn't choke on infinite errors. dataset = DataSet(data={'description': 'example dataset'}, database=self.database) image = Image(dataset=dataset, data=db_subs.example_dbimage_data_dict()) # Inserting a standard example extractedsource should be fine extracted_source = db_subs.example_extractedsource_tuple() insert_extracted_sources(image._id, [extracted_source]) inserted = columns_from_table('extractedsource', where={'image': image.id}) self.assertEqual(len(inserted), 1) # But if the source has infinite errors we drop it and log a warning extracted_source = db_subs.example_extractedsource_tuple( error_radius=float('inf'), peak_err=float('inf'), flux_err=float('inf')) # We will add a handler to the root logger which catches all log # output in a buffer. iostream = BytesIO() hdlr = logging.StreamHandler(iostream) logging.getLogger().addHandler(hdlr) insert_extracted_sources(image._id, [extracted_source]) logging.getLogger().removeHandler(hdlr) # We want to be sure that the error has been appropriately logged. self.assertIn("Dropped source fit with infinite flux errors", iostream.getvalue()) inserted = columns_from_table('extractedsource', where={'image': image.id}) self.assertEqual(len(inserted), 1)
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': frequency, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ew_sys_err': 20, 'ns_sys_err': 20, 'i_peak': 10*i, 'i_peak_err': 0.1, 'error_radius': 10.0 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], data['i_peak']*(1+i), data['i_peak_err'], data['i_peak']*(1+i), data['i_peak_err'], 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ew_sys_err'], data['ns_sys_err'], # Systematic errors data['error_radius']) sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
def test_update(self): """ Check that ORM-updates work for the Image class. """ dataset1 = DataSet( data={'description': 'dataset with changing images'}, database=self.database) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) # Now, update (without changing anything) and make sure it remains the # same (sanity check): image1.update() self.assertEqual(image1.tau_time, image_data['tau_time']) ##This time, change something: tau_time1 = image_data['tau_time'] + 100 tau_time2 = tau_time1 + 300 tau_time3 = tau_time2 + 300 tau_time4 = tau_time3 + 300 freq_eff1 = image_data['freq_eff'] * 1.2 image1.update(tau_time=tau_time1) self.assertEqual(image1.tau_time, tau_time1) # New 'image' orm-object, created from the database id of image1. image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, tau_time1) self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff']) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=tau_time2) self.assertAlmostEqual(image1.tau_time, tau_time1) image1.update() self.assertAlmostEqual(image1.tau_time, tau_time2) image1.update(tau_time=tau_time3) image2.update() self.assertAlmostEqual(image2.tau_time, tau_time3) #Test with multiple items: image1.update(tau_time=tau_time4, freq_eff=freq_eff1) self.assertAlmostEqual(image1.tau_time, tau_time4) self.assertAlmostEqual(image1.freq_eff, freq_eff1) #And that datetime conversion roundtrips work correctly: dtime0 = image_data['taustart_ts'] dtime1 = dtime0 + datetime.timedelta(days=3) self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime0) image2.update(taustart_ts=dtime1) #image1 orm-object not yet updated, still caches old value: self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime1) image1.update() self.assertEqual(image1.taustart_ts, dtime1)
def test_only_first_epoch_source(self): """test_only_first_epoch_source - Pretend to extract a source only from the first image. - Run source association for each image, as we would in TraP. - Check the image source listing works - Check runcat and assocxtrsource are correct. """ first_epoch = True extracted_source_ids = [] for im in self.im_params: self.db_imgs.append(Image(data=im, dataset=self.dataset)) last_img = self.db_imgs[-1] if first_epoch: last_img.insert_extracted_sources( [db_subs.example_extractedsource_tuple()], 'blind') last_img.associate_extracted_sources(deRuiter_r, new_source_sigma_margin) #First, check the runcat has been updated correctly: running_cat = columns_from_table( table="runningcatalog", keywords=['datapoints'], where={"dataset": self.dataset.id}) self.assertEqual(len(running_cat), 1) self.assertEqual(running_cat[0]['datapoints'], 1) last_img.update() last_img.update_sources() img_xtrsrc_ids = [src.id for src in last_img.sources] # print "ImageID:", last_img.id # print "Imgs sources:", img_xtrsrc_ids if first_epoch: self.assertEqual(len(img_xtrsrc_ids), 1) extracted_source_ids.extend(img_xtrsrc_ids) assocxtrsrcs_rows = columns_from_table( table="assocxtrsource", keywords=['runcat', 'xtrsrc'], where={"xtrsrc": img_xtrsrc_ids[0]}) self.assertEqual(len(assocxtrsrcs_rows), 1) self.assertEqual(assocxtrsrcs_rows[0]['xtrsrc'], img_xtrsrc_ids[0]) else: self.assertEqual(len(img_xtrsrc_ids), 0) first_epoch = False #Assocxtrsources still ok after multiple images? self.assertEqual(len(extracted_source_ids), 1) assocxtrsrcs_rows = columns_from_table( table="assocxtrsource", keywords=['runcat', 'xtrsrc'], where={"xtrsrc": extracted_source_ids[0]}) self.assertEqual(len(assocxtrsrcs_rows), 1) self.assertEqual( assocxtrsrcs_rows[0]['xtrsrc'], extracted_source_ids[0], "Runcat xtrsrc entry must match the only extracted source")
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff=frequency) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10. * i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err=data['decl_fit_err'], peak=data['i_peak'] * (1 + i), peak_err=data['i_peak_err'], flux=data['i_peak'] * (1 + i), flux_err=data['i_peak_err'], fit_type=data['fit_type']) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
def test_update(self): """ Check that ORM-updates work for the Image class. """ dataset1 = DataSet(data={'description': 'dataset with changing images'}, database=self.database) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) # Now, update (without changing anything) and make sure it remains the # same (sanity check): image1.update() self.assertEqual(image1.tau_time, image_data['tau_time']) ##This time, change something: tau_time1 = image_data['tau_time'] + 100 tau_time2 = tau_time1 + 300 tau_time3 = tau_time2 + 300 tau_time4 = tau_time3 + 300 freq_eff1 = image_data['freq_eff']*1.2 image1.update(tau_time = tau_time1) self.assertEqual(image1.tau_time, tau_time1) # New 'image' orm-object, created from the database id of image1. image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, tau_time1) self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff']) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=tau_time2) self.assertAlmostEqual(image1.tau_time, tau_time1) image1.update() self.assertAlmostEqual(image1.tau_time, tau_time2) image1.update(tau_time=tau_time3) image2.update() self.assertAlmostEqual(image2.tau_time, tau_time3) #Test with multiple items: image1.update(tau_time=tau_time4, freq_eff=freq_eff1) self.assertAlmostEqual(image1.tau_time, tau_time4) self.assertAlmostEqual(image1.freq_eff, freq_eff1) #And that datetime conversion roundtrips work correctly: dtime0 = image_data['taustart_ts'] dtime1 = dtime0 + datetime.timedelta(days=3) self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime0) image2.update(taustart_ts=dtime1) #image1 orm-object not yet updated, still caches old value: self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime1) image1.update() self.assertEqual(image1.taustart_ts, dtime1)
def test_lightcurve(self): # make 4 * 5 images with different date images = [] for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': 80e6, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ra_sys_err': 20, 'decl_sys_err': 20, 'i_peak': 10 * i , 'i_peak_err': 0.1 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], # Gaussian fit errors data['i_peak'] * (1 + i), data['i_peak_err'], # Peak data['i_peak'] * (1 + i), data['i_peak_err'], # Integrated 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ra_sys_err'], data['decl_sys_err']) # Systematic errors sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images.pop().sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = sources[0].lightcurve() # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) # Since the light curves are very similar, only eta_nu is different results = dbtransients._select_updated_variability_indices(self.dataset.images[-1].id) results = sorted(results, key=itemgetter('eta_int')) for result, eta_nu in zip(results, (16666.66666667, 66666.666666667, 150000.0)): self.assertEqual(result['f_datapoints'], 4) self.assertAlmostEqual(result['eta_int'], eta_nu) self.assertAlmostEqual(result['v_int'], 0.516397779494)
def test_update(self): dataset1 = DataSet(data={'description': 'dataset with changing images'}, database=self.database) data = dict(tau_time=1000, freq_eff=80e6, url='/', taustart_ts=datetime.datetime(2001, 1, 1), freq_bw=1e6, beam_smaj_pix=float(2.7), beam_smin_pix=float(2.3), beam_pa_rad=float(1.7), deltax=float(-0.01111), deltay=float(0.01111), centre_ra=0, centre_decl=0, xtr_radius=3 ) image1 = Image(dataset=dataset1, data=data) self.assertAlmostEqual(image1.tau_time, 1000.) self.assertAlmostEqual(image1.freq_eff, 80e6) image1.update(tau_time=2000.) self.assertAlmostEqual(image1.tau_time, 2000.) # New image, created from the database image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, 2000.) self.assertAlmostEqual(image2.freq_eff, 80e6) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=1500) self.assertAlmostEqual(image1.tau_time, 2000) image1.update() self.assertAlmostEqual(image1.tau_time, 1500) image1.update(tau_time=2500) image2.update() self.assertAlmostEqual(image2.tau_time, 2500) image1.update(tau_time=1000., freq_eff=90e6) self.assertAlmostEqual(image1.tau_time, 1000) self.assertAlmostEqual(image1.freq_eff, 90e6) self.assertEqual(image1.taustart_ts, datetime.datetime(2001, 1, 1)) self.assertEqual(image2.taustart_ts, datetime.datetime(2001, 1, 1)) image2.update(taustart_ts=datetime.datetime(2010, 3, 3)) self.assertEqual(image1.taustart_ts, datetime.datetime(2001, 1, 1)) self.assertEqual(image2.taustart_ts, datetime.datetime(2010, 3, 3)) self.assertAlmostEqual(image2.tau_time, 1000) self.assertAlmostEqual(image2.freq_eff, 90e6) image1.update() self.assertEqual(image1.taustart_ts, datetime.datetime(2010, 3, 3))
def test_single_fixed_source(self): """test_single_fixed_source - Pretend to extract the same source in each of a series of images. - Perform source association - Check the image source listing works - Check runcat, assocxtrsource. """ fixed_src_runcat_id = None for img_idx, im in enumerate(self.im_params): self.db_imgs.append(Image(data=im, dataset=self.dataset)) last_img = self.db_imgs[-1] last_img.insert_extracted_sources( [db_subs.example_extractedsource_tuple()], 'blind') last_img.associate_extracted_sources(deRuiter_r, new_source_sigma_margin) running_cat = columns_from_table( table="runningcatalog", keywords=['id', 'datapoints'], where={"dataset": self.dataset.id}) self.assertEqual(len(running_cat), 1) self.assertEqual(running_cat[0]['datapoints'], img_idx + 1) #Check runcat ID does not change for a steady single source if img_idx == 0: fixed_src_runcat_id = running_cat[0]['id'] self.assertIsNotNone(fixed_src_runcat_id, "No runcat id assigned to source") self.assertEqual(running_cat[0]['id'], fixed_src_runcat_id, "Multiple runcat ids for same fixed source") runcat_flux = columns_from_table( table="runningcatalog_flux", keywords=['f_datapoints'], where={"runcat": fixed_src_runcat_id}) self.assertEqual(len(runcat_flux), 1) self.assertEqual(img_idx + 1, runcat_flux[0]['f_datapoints']) last_img.update() last_img.update_sources() img_xtrsrc_ids = [src.id for src in last_img.sources] self.assertEqual(len(img_xtrsrc_ids), 1) #Get the association row for most recent extraction: assocxtrsrcs_rows = columns_from_table( table="assocxtrsource", keywords=['runcat', 'xtrsrc'], where={"xtrsrc": img_xtrsrc_ids[0]}) # print "ImageID:", last_img.id # print "Imgs sources:", img_xtrsrc_ids # print "Assoc entries:", assocxtrsrcs_rows # print "First extracted source id:", ds_source_ids[0] # if len(assocxtrsrcs_rows): # print "Associated source:", assocxtrsrcs_rows[0]['xtrsrc'] self.assertEqual( len(assocxtrsrcs_rows), 1, msg="No entries in assocxtrsrcs for image number " + str(img_idx)) self.assertEqual(assocxtrsrcs_rows[0]['runcat'], fixed_src_runcat_id, "Mismatched runcat id in assocxtrsrc table")
def test_lightcurve(self): # make 4 images with different date images = [] image_datasets = db_subs.generate_timespaced_dbimages_data(n_images=4, taustart_ts= datetime.datetime(2010, 3, 3) ) for dset in image_datasets: image = Image(dataset=self.dataset, data=dset) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'i_peak': 10. * i , 'i_peak_err': 0.1, }) # Insert the 3 sources in each image, while further varying the flux lightcurves_sorted_by_ra = [[],[],[]] for im_idx, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. img_sources = [] for src_idx, data in enumerate(data_list): src = db_subs.example_extractedsource_tuple( ra = data['ra'],dec=data['decl'], peak=data['i_peak']* (1 + im_idx), flux = data['i_peak']* (1 + im_idx) ) lightcurves_sorted_by_ra[src_idx].append(src) img_sources.append(src) insert_extracted_sources(image._id, img_sources) associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick last image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = ligtcurve_func(sources[0]._id) # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) #Check the summary statistics (avg flux, etc) query = """\ SELECT rf.avg_f_int ,rf.avg_f_int_sq ,avg_weighted_f_int ,avg_f_int_weight FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset)s AND r.id = rf.runcat ORDER BY r.wm_ra """ self.database.cursor.execute(query, {'dataset': self.dataset.id}) runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor) self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra)) for idx, flux_summary in enumerate(runcat_flux_entries): py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) for key in flux_summary.keys(): self.assertAlmostEqual(flux_summary[key], py_results[-1][key]) #Now check the per-timestep statistics (variability indices) sorted_runcat_ids = columns_from_table('runningcatalog', where={'dataset':self.dataset.id}, order='wm_ra') sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids] for idx, rcid in enumerate(sorted_runcat_ids): db_indices = db_queries.get_assoc_entries(self.database, rcid) py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) self.assertEqual(len(db_indices), len(py_indices)) for nstep in range(len(db_indices)): for key in ('v_int', 'eta_int', 'f_datapoints'): self.assertAlmostEqual(db_indices[nstep][key], py_indices[nstep][key], places=5)
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff = frequency ) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10.*i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err= data['decl_fit_err'], peak = data['i_peak']*(1+i), peak_err = data['i_peak_err'], flux = data['i_peak']*(1+i), flux_err = data['i_peak_err'], fit_type=data['fit_type'] ) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)