def testAgainstPersisted(self): shardId = 2222 dataset_name = IngestIndexedReferenceTask.ConfigClass( ).dataset_config.ref_dataset_name dataId = self.indexer.makeDataId(shardId, dataset_name) self.assertTrue(self.testButler.datasetExists('ref_cat', dataId)) refCat = self.testButler.get('ref_cat', dataId) if REGENERATE_COMPARISON: if os.path.exists(self.testCatPath): os.unlink(self.testCatPath) refCat.writeFits(self.testCatPath) self.fail( "New comparison data written; unset REGENERATE_COMPARISON in order to proceed" ) ex1 = refCat.extract('*') testCat = afwTable.SimpleCatalog.readFits(self.testCatPath) ex2 = testCat.extract('*') self.assertEqual(set(ex1.keys()), set(ex2.keys())) for kk in ex1: np.testing.assert_array_almost_equal( ex1[kk], ex2[kk], )
def testAgainstPersisted(self): pix_id = 671901 dataset_name = IngestIndexedReferenceTask.ConfigClass( ).dataset_config.ref_dataset_name data_id = self.indexer.make_data_id(pix_id, dataset_name) self.assertTrue(self.test_butler.datasetExists('ref_cat', data_id)) ref_cat = self.test_butler.get('ref_cat', data_id) ex1 = ref_cat.extract('*') ex2 = self.test_cat.extract('*') # compare sets as the order may be different self.assertDictEqual(ex1, ex2)
def setUpClass(cls): cls.out_path = tempfile.mkdtemp() cls.test_cat_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), "data", "testHtmIndex.fits") cls.test_cat = afwTable.SourceCatalog.readFits(cls.test_cat_path) ret = cls.make_sky_catalog(cls.out_path) cls.sky_catalog_file, cls.sky_catalog_file_delim, cls.sky_catalog = ret cls.test_ras = [210., 14.5, 93., 180., 286., 0.] cls.test_decs = [-90., -51., -30.1, 0., 27.3, 62., 90.] cls.search_radius = 3. * afwGeom.degrees cls.comp_cats = { } # dict of center coord: list of IDs of stars within cls.search_radius of center cls.depth = 4 # gives a mean area of 20 deg^2 per pixel, roughly matching a 3 deg search radius config = IndexerRegistry['HTM'].ConfigClass() # Match on disk comparison file config.depth = cls.depth cls.indexer = IndexerRegistry['HTM'](config) for ra in cls.test_ras: for dec in cls.test_decs: tupl = (ra, dec) cent = make_coord(*tupl) cls.comp_cats[tupl] = [] for rec in cls.sky_catalog: if make_coord(rec['ra_icrs'], rec['dec_icrs']).separation( cent) < cls.search_radius: cls.comp_cats[tupl].append(rec['id']) cls.test_repo_path = cls.out_path + "/test_repo" config = IngestIndexedReferenceTask.ConfigClass() # To match on disk test data config.dataset_config.indexer.active.depth = cls.depth config.ra_name = 'ra_icrs' config.dec_name = 'dec_icrs' config.mag_column_list = ['a', 'b'] config.id_name = 'id' config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'} IngestIndexedReferenceTask.parseAndRun(args=[ input_dir, "--output", cls.test_repo_path, cls.sky_catalog_file ], config=config) cls.default_dataset_name = config.dataset_config.ref_dataset_name cls.test_dataset_name = 'diff_ref_name' cls.test_butler = dafPersist.Butler(cls.test_repo_path) os.symlink( os.path.join(cls.test_repo_path, 'ref_cats', cls.default_dataset_name), os.path.join(cls.test_repo_path, 'ref_cats', cls.test_dataset_name))
def makeConfig(withMagErr=False, withRaDecErr=False, withPm=False, withPmErr=False, withParallax=False, withParallaxErr=False): """Make a config for IngestIndexedReferenceTask This is primarily intended to simplify tests of config validation, so fields that are not validated are not set. However, it can calso be used to reduce boilerplate in other tests. """ config = IngestIndexedReferenceTask.ConfigClass() config.pm_scale = 1000.0 config.ra_name = 'ra_icrs' config.dec_name = 'dec_icrs' config.mag_column_list = ['a', 'b'] if withMagErr: config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'} if withRaDecErr: config.ra_err_name = "ra_err" config.dec_err_name = "dec_err" if withPm: config.pm_ra_name = "pm_ra" config.pm_dec_name = "pm_dec" if withPmErr: config.pm_ra_err_name = "pm_ra_err" config.pm_dec_err_name = "pm_dec_err" if withParallax: config.parallax_name = "parallax" if withParallaxErr: config.parallax_err_name = "parallax_err" if withPm or withParallax: config.epoch_name = "unixtime" config.epoch_format = "unix" config.epoch_scale = "utc" return config
def testAgainstPersisted(self): pix_id = 2222 dataset_name = IngestIndexedReferenceTask.ConfigClass( ).dataset_config.ref_dataset_name data_id = self.indexer.make_data_id(pix_id, dataset_name) self.assertTrue(self.test_butler.datasetExists('ref_cat', data_id)) ref_cat = self.test_butler.get('ref_cat', data_id) if REGENERATE_COMPARISON: os.unlink(self.test_cat_path) ref_cat.writeFits(self.test_cat_path) self.fail( "New comparison data written; unset REGENERATE_COMPARISON in order to proceed" ) ex1 = ref_cat.extract('*') ex2 = self.test_cat.extract('*') self.assertEqual(set(ex1.keys()), set(ex2.keys())) for kk in ex1: np.testing.assert_array_equal(ex1[kk], ex2[kk])
def testIngest(self): """Test IngestIndexedReferenceTask.""" default_config = IngestIndexedReferenceTask.ConfigClass() # test ingest with default config # This should raise since I haven't specified the ra/dec/mag columns. with self.assertRaises(ValueError): IngestIndexedReferenceTask.parseAndRun(args=[ input_dir, "--output", self.out_path + "/output", self.sky_catalog_file ], config=default_config) # test with ~minimum config. Mag errors are not technically necessary, but might as well test here default_config.ra_name = 'ra_icrs' default_config.dec_name = 'dec_icrs' default_config.mag_column_list = ['a', 'b'] default_config.mag_err_column_map = {'a': 'a_err'} # should raise since all columns need an error column if any do with self.assertRaises(ValueError): IngestIndexedReferenceTask.parseAndRun(args=[ input_dir, "--output", self.out_path + "/output", self.sky_catalog_file ], config=default_config) # test with multiple files and correct config default_config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'} IngestIndexedReferenceTask.parseAndRun(args=[ input_dir, "--output", self.out_path + "/output_multifile", self.sky_catalog_file, self.sky_catalog_file ], config=default_config) # test with config overrides default_config = IngestIndexedReferenceTask.ConfigClass() default_config.ra_name = 'ra' default_config.dec_name = 'dec' default_config.mag_column_list = ['a', 'b'] default_config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'} default_config.dataset_config.ref_dataset_name = 'myrefcat' default_config.dataset_config.indexer.active.depth = 10 default_config.is_photometric_name = 'is_phot' default_config.is_resolved_name = 'is_res' default_config.is_variable_name = 'is_var' default_config.id_name = 'id' default_config.extra_col_names = ['val1', 'val2', 'val3'] default_config.file_reader.header_lines = 1 default_config.file_reader.colnames = [ 'id', 'ra', 'dec', 'a', 'a_err', 'b', 'b_err', 'is_phot', 'is_res', 'is_var', 'val1', 'val2', 'val3' ] default_config.file_reader.delimiter = '|' # this also tests changing the delimiter IngestIndexedReferenceTask.parseAndRun(args=[ input_dir, "--output", self.out_path + "/output_override", self.sky_catalog_file_delim ], config=default_config) # This location is known to have objects cent = make_coord(93.0, -90.0) # Test if we can get back the catalog with a non-standard dataset name butler = dafPersist.Butler(self.out_path + "/output_override") config = LoadIndexedReferenceObjectsConfig() config.ref_dataset_name = "myrefcat" loader = LoadIndexedReferenceObjectsTask(butler=butler, config=config) cat = loader.loadSkyCircle(cent, self.search_radius, filterName='a') self.assertTrue(len(cat) > 0) # test that a catalog can be loaded even with a name not used for ingestion butler = dafPersist.Butler(self.test_repo_path) config = LoadIndexedReferenceObjectsConfig() config.ref_dataset_name = self.test_dataset_name loader = LoadIndexedReferenceObjectsTask(butler=butler, config=config) cat = loader.loadSkyCircle(cent, self.search_radius, filterName='a') self.assertTrue(len(cat) > 0)
"%i", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g", "%i", "%i", "%i", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g" ]) np.savetxt(outPath + "/ref.txt", arr, delimiter=",", **saveKwargs) return outPath + "/ref.txt" inPath = os.path.join(lsst.utils.getPackageDir('meas_algorithms'), 'tests/data') outPath = os.path.join(inPath, "version%s" % LATEST_FORMAT_VERSION) skyCatalogFile = make_skyCatalog(inPath) config = IngestIndexedReferenceTask.ConfigClass() config.ra_name = 'ra_icrs' config.dec_name = 'dec_icrs' config.mag_column_list = ['a', 'b'] config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'} config.dataset_config.indexer.active.depth = 4 OBS_TEST_DIR = lsst.utils.getPackageDir('obs_test') obsTestPath = os.path.join(OBS_TEST_DIR, "data", "input") IngestIndexedReferenceTask.parseAndRun( args=[obsTestPath, "--output", outPath, skyCatalogFile], config=config) # cleanup files and make the mapper independent of obs_test's path os.remove(skyCatalogFile) os.remove(os.path.join(outPath, 'repositoryCfg.yaml')) with open(os.path.join(outPath, '_mapper'), 'w') as mapperFile: