def test_create(self): data = { 'freq_eff': 80e6, 'freq_bw': 1e6, 'taustart_ts': datetime.datetime(1999, 9, 9), 'url': '/path/to/image', 'tau_time': 0, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 0, 'centre_decl': 0, 'xtr_radius' : 3 } dataset1 = DataSet(data={'description': 'dataset with images'}, database=self.database) #self.assertEqual(dataset1.images, set()) image1 = Image(dataset=dataset1, data=data) # Images are automatically added to their dataset #self.assertEqual(dataset1.images, set([image1])) self.assertEqual(image1.tau_time, 0) self.assertAlmostEqual(image1.freq_eff, 80e6) image2 = Image(dataset=dataset1, data=data) #self.assertEqual(dataset1.images, set([image1, image2])) dataset2 = DataSet(database=self.database, id=dataset1.id) # Note that we can't test that dataset2.images = set([image1, image2]), # because dataset2.images are newly created Python objects, # with different ids #self.assertEqual(len(dataset2.images), 2) ##Now, update and try it again: image1.update() self.assertEqual(image1.tau_time, 0)
def test_lightcurve(self): # make 4 images with different date images = [] image_datasets = db_subs.generate_timespaced_dbimages_data(n_images=4, taustart_ts= datetime.datetime(2010, 3, 3) ) for dset in image_datasets: image = Image(dataset=self.dataset, data=dset) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'i_peak': 10. * i , 'i_peak_err': 0.1, }) # Insert the 3 sources in each image, while further varying the flux lightcurves_sorted_by_ra = [[],[],[]] for im_idx, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. img_sources = [] for src_idx, data in enumerate(data_list): src = db_subs.example_extractedsource_tuple( ra = data['ra'],dec=data['decl'], peak=data['i_peak']* (1 + im_idx), flux = data['i_peak']* (1 + im_idx) ) lightcurves_sorted_by_ra[src_idx].append(src) img_sources.append(src) insert_extracted_sources(image._id, img_sources) associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick last image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = ligtcurve_func(sources[0]._id) # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) #Check the summary statistics (avg flux, etc) query = """\ SELECT rf.avg_f_int ,rf.avg_f_int_sq ,avg_weighted_f_int ,avg_f_int_weight FROM runningcatalog r ,runningcatalog_flux rf WHERE r.dataset = %(dataset)s AND r.id = rf.runcat ORDER BY r.wm_ra """ self.database.cursor.execute(query, {'dataset': self.dataset.id}) runcat_flux_entries = get_db_rows_as_dicts(self.database.cursor) self.assertEqual(len(runcat_flux_entries), len(lightcurves_sorted_by_ra)) for idx, flux_summary in enumerate(runcat_flux_entries): py_results = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) for key in flux_summary.keys(): self.assertAlmostEqual(flux_summary[key], py_results[-1][key]) #Now check the per-timestep statistics (variability indices) sorted_runcat_ids = columns_from_table('runningcatalog', where={'dataset':self.dataset.id}, order='wm_ra') sorted_runcat_ids = [entry['id'] for entry in sorted_runcat_ids] for idx, rcid in enumerate(sorted_runcat_ids): db_indices = db_queries.get_assoc_entries(self.database, rcid) py_indices = db_subs.lightcurve_metrics(lightcurves_sorted_by_ra[idx]) self.assertEqual(len(db_indices), len(py_indices)) for nstep in range(len(db_indices)): for key in ('v_int', 'eta_int', 'f_datapoints'): self.assertAlmostEqual(db_indices[nstep][key], py_indices[nstep][key], places=5)
def test_lightcurve(self): # make 4 * 5 images with different date images = [] for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': 80e6, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.11 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ra_sys_err': 20, 'decl_sys_err': 20, 'i_peak': 10 * i , 'i_peak_err': 0.1 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" # Note that we reuse 'i_peak' as both peak & integrated flux. sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], # Gaussian fit errors data['i_peak'] * (1 + i), data['i_peak_err'], # Peak data['i_peak'] * (1 + i), data['i_peak_err'], # Integrated 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ra_sys_err'], data['decl_sys_err']) # Systematic errors sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve sources = self.dataset.images.pop().sources sources = sorted(sources, key=attrgetter('ra')) lightcurve = sources[0].lightcurve() # check if the sources are associated in all images self.assertEqual(len(images), len(lightcurve)) self.assertEqual(lightcurve[0][0], datetime.datetime(2010, 3, 3, 0, 0)) self.assertEqual(lightcurve[1][0], datetime.datetime(2010, 3, 4, 0, 0)) self.assertEqual(lightcurve[2][0], datetime.datetime(2010, 3, 5, 0, 0)) self.assertEqual(lightcurve[3][0], datetime.datetime(2010, 3, 6, 0, 0)) self.assertAlmostEqual(lightcurve[0][2], 10.) self.assertAlmostEqual(lightcurve[1][2], 20.) self.assertAlmostEqual(lightcurve[2][2], 30.) self.assertAlmostEqual(lightcurve[3][2], 40.) # Since the light curves are very similar, only eta_nu is different results = dbtransients._select_updated_variability_indices(self.dataset.images[-1].id) results = sorted(results, key=itemgetter('eta_int')) for result, eta_nu in zip(results, (16666.66666667, 66666.666666667, 150000.0)): self.assertEqual(result['f_datapoints'], 4) self.assertAlmostEqual(result['eta_int'], eta_nu) self.assertAlmostEqual(result['v_int'], 0.516397779494)
def test_update(self): dataset1 = DataSet(data={'description': 'dataset with changing images'}, database=self.database) data = dict(tau_time=1000, freq_eff=80e6, url='/', taustart_ts=datetime.datetime(2001, 1, 1), freq_bw=1e6, beam_smaj_pix=float(2.7), beam_smin_pix=float(2.3), beam_pa_rad=float(1.7), deltax=float(-0.01111), deltay=float(0.01111), centre_ra=0, centre_decl=0, xtr_radius=3 ) image1 = Image(dataset=dataset1, data=data) self.assertAlmostEqual(image1.tau_time, 1000.) self.assertAlmostEqual(image1.freq_eff, 80e6) image1.update(tau_time=2000.) self.assertAlmostEqual(image1.tau_time, 2000.) # New image, created from the database image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, 2000.) self.assertAlmostEqual(image2.freq_eff, 80e6) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=1500) self.assertAlmostEqual(image1.tau_time, 2000) image1.update() self.assertAlmostEqual(image1.tau_time, 1500) image1.update(tau_time=2500) image2.update() self.assertAlmostEqual(image2.tau_time, 2500) image1.update(tau_time=1000., freq_eff=90e6) self.assertAlmostEqual(image1.tau_time, 1000) self.assertAlmostEqual(image1.freq_eff, 90e6) self.assertEqual(image1.taustart_ts, datetime.datetime(2001, 1, 1)) self.assertEqual(image2.taustart_ts, datetime.datetime(2001, 1, 1)) image2.update(taustart_ts=datetime.datetime(2010, 3, 3)) self.assertEqual(image1.taustart_ts, datetime.datetime(2001, 1, 1)) self.assertEqual(image2.taustart_ts, datetime.datetime(2010, 3, 3)) self.assertAlmostEqual(image2.tau_time, 1000) self.assertAlmostEqual(image2.freq_eff, 90e6) image1.update() self.assertEqual(image1.taustart_ts, datetime.datetime(2010, 3, 3))
def test_update(self): """ Check that ORM-updates work for the Image class. """ dataset1 = DataSet( data={'description': 'dataset with changing images'}, database=self.database) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) # Now, update (without changing anything) and make sure it remains the # same (sanity check): image1.update() self.assertEqual(image1.tau_time, image_data['tau_time']) ##This time, change something: tau_time1 = image_data['tau_time'] + 100 tau_time2 = tau_time1 + 300 tau_time3 = tau_time2 + 300 tau_time4 = tau_time3 + 300 freq_eff1 = image_data['freq_eff'] * 1.2 image1.update(tau_time=tau_time1) self.assertEqual(image1.tau_time, tau_time1) # New 'image' orm-object, created from the database id of image1. image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, tau_time1) self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff']) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=tau_time2) self.assertAlmostEqual(image1.tau_time, tau_time1) image1.update() self.assertAlmostEqual(image1.tau_time, tau_time2) image1.update(tau_time=tau_time3) image2.update() self.assertAlmostEqual(image2.tau_time, tau_time3) #Test with multiple items: image1.update(tau_time=tau_time4, freq_eff=freq_eff1) self.assertAlmostEqual(image1.tau_time, tau_time4) self.assertAlmostEqual(image1.freq_eff, freq_eff1) #And that datetime conversion roundtrips work correctly: dtime0 = image_data['taustart_ts'] dtime1 = dtime0 + datetime.timedelta(days=3) self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime0) image2.update(taustart_ts=dtime1) #image1 orm-object not yet updated, still caches old value: self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime1) image1.update() self.assertEqual(image1.taustart_ts, dtime1)
def test_update(self): """ Check that ORM-updates work for the Image class. """ dataset1 = DataSet(data={'description': 'dataset with changing images'}, database=self.database) image_data = db_subs.example_dbimage_data_dict() image1 = Image(dataset=dataset1, data=image_data) # Now, update (without changing anything) and make sure it remains the # same (sanity check): image1.update() self.assertEqual(image1.tau_time, image_data['tau_time']) ##This time, change something: tau_time1 = image_data['tau_time'] + 100 tau_time2 = tau_time1 + 300 tau_time3 = tau_time2 + 300 tau_time4 = tau_time3 + 300 freq_eff1 = image_data['freq_eff']*1.2 image1.update(tau_time = tau_time1) self.assertEqual(image1.tau_time, tau_time1) # New 'image' orm-object, created from the database id of image1. image2 = Image(dataset=dataset1, id=image1.id) self.assertAlmostEqual(image2.tau_time, tau_time1) self.assertAlmostEqual(image2.freq_eff, image_data['freq_eff']) # Same id, so changing image2 changes image1 # but *only* after calling update() image2.update(tau_time=tau_time2) self.assertAlmostEqual(image1.tau_time, tau_time1) image1.update() self.assertAlmostEqual(image1.tau_time, tau_time2) image1.update(tau_time=tau_time3) image2.update() self.assertAlmostEqual(image2.tau_time, tau_time3) #Test with multiple items: image1.update(tau_time=tau_time4, freq_eff=freq_eff1) self.assertAlmostEqual(image1.tau_time, tau_time4) self.assertAlmostEqual(image1.freq_eff, freq_eff1) #And that datetime conversion roundtrips work correctly: dtime0 = image_data['taustart_ts'] dtime1 = dtime0 + datetime.timedelta(days=3) self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime0) image2.update(taustart_ts=dtime1) #image1 orm-object not yet updated, still caches old value: self.assertEqual(image1.taustart_ts, dtime0) self.assertEqual(image2.taustart_ts, dtime1) image1.update() self.assertEqual(image1.taustart_ts, dtime1)
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff=frequency) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10. * i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err=data['decl_fit_err'], peak=data['i_peak'] * (1 + i), peak_err=data['i_peak_err'], flux=data['i_peak'] * (1 + i), flux_err=data['i_peak_err'], fit_type=data['fit_type']) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: data = {'taustart_ts': datetime.datetime(2010, 3, day), 'tau_time': 3600, 'url': '/', 'freq_eff': frequency, 'freq_bw': 1e6, 'beam_smaj_pix': float(2.7), 'beam_smin_pix': float(2.3), 'beam_pa_rad': float(1.7), 'deltax': float(-0.01111), 'deltay': float(0.01111), 'centre_ra': 111, 'centre_decl': 11, 'xtr_radius' : 3 } image = Image(dataset=self.dataset, data=data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'ew_sys_err': 20, 'ns_sys_err': 20, 'i_peak': 10*i, 'i_peak_err': 0.1, 'error_radius': 10.0 # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = (data['ra'], data['decl'], data['ra_fit_err'], data['decl_fit_err'], data['i_peak']*(1+i), data['i_peak_err'], data['i_peak']*(1+i), data['i_peak_err'], 10., # Significance level 1, 1, 0, # Beam params (width arcsec major, width arcsec minor, parallactic angle) data['ew_sys_err'], data['ns_sys_err'], # Systematic errors data['error_radius']) sources.append(source) # Insert the sources image.insert_extracted_sources(sources) # Run the association for each list of source for an image image.associate_extracted_sources(deRuiter_r=3.7) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)
def test_lightsurface(self): images = [] # make 4 * 5 images with different frequencies and date for frequency in [80e6, 90e6, 100e6, 110e6, 120e6]: for day in [3, 4, 5, 6]: img_data = db_subs.example_dbimage_data_dict( taustart_ts=datetime.datetime(2010, 3, day), freq_eff = frequency ) image = Image(dataset=self.dataset, data=img_data) images.append(image) # 3 sources per image, with different coordinates & flux data_list = [] for i in range(1, 4): data_list.append({ 'ra': 111.111 + i, 'decl': 11.11 + i, 'ra_fit_err': 0.01, 'decl_fit_err': 0.01, 'i_peak': 10.*i, 'i_peak_err': 0.1, 'error_radius': 10.0, 'fit_type': 1, # x=0.11, y=0.22, z=0.33, det_sigma=11.1, zone=i }) # Insert the 3 sources in each image, while further varying the flux for i, image in enumerate(images): # Create the "source finding results" sources = [] for data in data_list: source = db_subs.example_extractedsource_tuple( ra=data['ra'], dec=data['decl'], ra_fit_err=data['ra_fit_err'], dec_fit_err= data['decl_fit_err'], peak = data['i_peak']*(1+i), peak_err = data['i_peak_err'], flux = data['i_peak']*(1+i), flux_err = data['i_peak_err'], fit_type=data['fit_type'] ) sources.append(source) # Insert the sources insert_extracted_sources(image._id, sources) # Run the association for each list of source for an image associate_extracted_sources(image._id, deRuiter_r=3.7, new_source_sigma_margin=3) # updates the dataset and its set of images self.dataset.update() self.dataset.update_images() # update the images and their sets of sources for image in self.dataset.images: image.update() image.update_sources() # Now pick any image, select the first source (smallest RA) # and extract its light curve # TODO: aaarch this is so ugly. Because this a set we need to pop it. sources = self.dataset.images.pop().sources #sources = self.dataset.images[-1].sources sources = sorted(sources, key=attrgetter('ra')) extracted_source = sources[0].id lightcurve = tkp.db.general.lightcurve(extracted_source)