def test_update(self): """Update all or individual dataset columns""" dataset1 = DataSet(data={'description': 'dataset 1'}, ) self.assertEqual(dataset1.description, "dataset 1") dataset1.update(rerun=5, description="new dataset") self.database.cursor.execute( "SELECT rerun, description FROM dataset WHERE id=%s", (dataset1.id,)) results = self.database.cursor.fetchone() self.assertEqual(results[0], 5) self.assertEqual(results[1], "new dataset") self.assertEqual(dataset1.description, "new dataset") self.assertEqual(dataset1.rerun, 5)
def test_update(self): """Update all or individual dataset columns""" dataset1 = DataSet(data={'description': 'dataset 1'}, ) self.assertEqual(dataset1.description, "dataset 1") dataset1.update(rerun=5, description="new dataset") self.database.cursor.execute( "SELECT rerun, description FROM dataset WHERE id=%s", (dataset1.id, )) results = self.database.cursor.fetchone() self.assertEqual(results[0], 5) self.assertEqual(results[1], "new dataset") self.assertEqual(dataset1.description, "new dataset") self.assertEqual(dataset1.rerun, 5) dataset1.update(process_end_ts=datetime.datetime(1970, 1, 1)) self.assertEqual(dataset1.process_end_ts, datetime.datetime(1970, 1, 1))
def test_create(self): """Create a new dataset, and retrieve it""" dataset1 = DataSet(data={'description': 'dataset 1'}) # The name for the following dataset will be ignored, and set # to the name of the dataset with dsid = dsid dataset2 = DataSet(id=dataset1.id) # update some stuff dataset2.update() self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) # 'data' is ignored if dsid is given: dataset3 = DataSet(data={'description': 'dataset 3'}, id=dataset1.id) self.assertEqual(dataset3.description, "dataset 1") self.assertEqual(dataset3.id, dataset1.id)
def test_create(self): """Create a new dataset, and retrieve it""" dataset1 = DataSet(data={'description': 'dataset 1'}) # The name for the following dataset will be ignored, and set # to the name of the dataset with dsid = dsid dataset2 = DataSet(id=dataset1.id) # update some stuff dataset2.update() self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) #dataset2.update(dsoutname='output.ms', # description='testing of dataset', # process_ts=datetime.datetime(1970, 1, 1)) dataset2.update(type=2, process_ts=datetime.datetime(1970, 1, 1)) self.assertEqual(dataset2.description, "dataset 1") self.assertEqual(dataset2.id, dataset1.id) # 'data' is ignored if dsid is given: dataset3 = DataSet(data={'description': 'dataset 3'}, id=dataset1.id) self.assertEqual(dataset3.description, "dataset 1") self.assertEqual(dataset3.id, dataset1.id)
class TestLightCurve(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightcurve(self): # make 4 images with different date images = [] image_datasets = db_subs.generate_timespaced_dbimages_data(n_images=4, taustart_ts= datetime.datetime(2010, 3, 3) ) for dset in image_datasets: image = Image(dataset=self.dataset, data=dset) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'i_peak': 10. * i , 'i_peak_err': 0.1, }) # Insert the 3 sources in each image, while further varying the flux lightcurves_sorted_by_ra = [[],[],[]] for im_idx, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. img_sources = [] for src_idx, data in enumerate(data_list): src = db_subs.example_extractedsource_tuple( ra = data['ra'],dec=data['decl'], peak=data['i_peak']* (1 + im_idx), flux = data['i_peak']* (1 + im_idx) ) lightcurves_sorted_by_ra[src_idx].append(src) img_sources.append(src) insert_extracted_sources(image._id, img_sources) associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick last image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = ligtcurve_func(sources[0]._id) # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) #Check the summary statistics (avg flux, etc) query = """\ SELECT rf.avg_f_int ,rf.avg_f_int_sq ,avg_weighted_f_int ,avg_f_int_weight FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset)s AND r.id = rf.runcat ORDER BY r.wm_ra """ self.database.cursor.execute(query, {'dataset': self.dataset.id}) runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor) self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra)) for idx, flux_summary in enumerate(runcat_flux_entries): py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) for key in flux_summary.keys(): self.assertAlmostEqual(flux_summary[key], py_results[-1][key]) #Now check the per-timestep statistics (variability indices) sorted_runcat_ids = columns_from_table('runningcatalog', where={'dataset':self.dataset.id}, order='wm_ra') sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids] for idx, rcid in enumerate(sorted_runcat_ids): db_indices = db_queries.get_assoc_entries(self.database, rcid) py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) self.assertEqual(len(db_indices), len(py_indices)) for nstep in range(len(db_indices)): for key in ('v_int', 'eta_int', 'f_datapoints'): self.assertAlmostEqual(db_indices[nstep][key], py_indices[nstep][key], places=5)
class TestLightCurve(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}, database=self.database) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightcurve(self): # make 4 * 5 images with different date images = [] for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': 80e6, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ra_sys_err': 20, 'decl_sys_err': 20, 'i_peak': 10 * i , 'i_peak_err': 0.1 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], # Gaussian fit errors data['i_peak'] * (1 + i), data['i_peak_err'], # Peak data['i_peak'] * (1 + i), data['i_peak_err'], # Integrated 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ra_sys_err'], data['decl_sys_err']) # Systematic errors sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images.pop().sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = sources[0].lightcurve() # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) # Since the light curves are very similar, only eta_nu is different results = dbtransients._select_updated_variability_indices(self.dataset.images[-1].id) results = sorted(results, key=itemgetter('eta_int')) for result, eta_nu in zip(results, (16666.66666667, 66666.666666667, 150000.0)): self.assertEqual(result['f_datapoints'], 4) self.assertAlmostEqual(result['eta_int'], eta_nu) self.assertAlmostEqual(result['v_int'], 0.516397779494)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff=frequency) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10. * i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err=data['decl_fit_err'], peak=data['i_peak'] * (1 + i), peak_err=data['i_peak_err'], flux=data['i_peak'] * (1 + i), flux_err=data['i_peak_err'], fit_type=data['fit_type']) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': frequency, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ew_sys_err': 20, 'ns_sys_err': 20, 'i_peak': 10*i, 'i_peak_err': 0.1, 'error_radius': 10.0 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], data['i_peak']*(1+i), data['i_peak_err'], data['i_peak']*(1+i), data['i_peak_err'], 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ew_sys_err'], data['ns_sys_err'], # Systematic errors data['error_radius']) sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
class TestLightSurface(unittest.TestCase): def setUp(self): self.database = tkp.db.Database() self.dataset = DataSet(data={'description': 'dataset with images'}) def tearDown(self): tkp.db.rollback() @requires_database() def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff = frequency ) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10.*i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err= data['decl_fit_err'], peak = data['i_peak']*(1+i), peak_err = data['i_peak_err'], flux = data['i_peak']*(1+i), flux_err = data['i_peak_err'], fit_type=data['fit_type'] ) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)