def runTest(withRaDecErr):
            # Generate a second catalog, with different ids
            inPath1 = tempfile.mkdtemp()
            skyCatalogFile1, _, skyCatalog1 = self.makeSkyCatalog(inPath1, idStart=25, seed=123)
            inPath2 = tempfile.mkdtemp()
            skyCatalogFile2, _, skyCatalog2 = self.makeSkyCatalog(inPath2, idStart=5432, seed=11)
            # override some field names, and use multiple cores
            config = ingestIndexTestBase.makeIngestIndexConfig(withRaDecErr=withRaDecErr, withMagErr=True,
                                                               withPm=True, withPmErr=True)
            # use a very small HTM pixelization depth to ensure there will be collisions when
            # ingesting the files in parallel
            config.dataset_config.indexer.active.depth = 2
            # np.savetxt prepends '# ' to the header lines, so use a reader that understands that
            config.file_reader.format = 'ascii.commented_header'
            config.n_processes = 2  # use multiple cores for this test only
            config.id_name = 'id'  # Use the ids from the generated catalogs
            outpath = os.path.join(self.outPath, "output_multifile_parallel",
                                   "_withRaDecErr" if withRaDecErr else "")
            IngestIndexedReferenceTask.parseAndRun(
                args=[self.input_dir, "--output", outpath,
                      skyCatalogFile1, skyCatalogFile2], config=config)

            # Test if we can get back the catalog with a non-standard dataset name
            butler = dafPersist.Butler(outpath)
            loaderConfig = LoadIndexedReferenceObjectsConfig()
            loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig)
            self.checkAllRowsInRefcat(loader, skyCatalog1, config)
            self.checkAllRowsInRefcat(loader, skyCatalog2, config)
    def setUp(self):
        np.random.seed(10)

        self.log = lsst.log.Log.getLogger("TestIngestIndexManager")
        self.config = ingestIndexTestBase.makeIngestIndexConfig(withRaDecErr=True)
        self.config.id_name = 'id'
        depth = 2  # very small depth, for as few pixels as possible.
        self.indexer = HtmIndexer(depth)
        self.htm = lsst.sphgeom.HtmPixelization(depth)
        ingester = IngestIndexedReferenceTask(self.config)
        dtype = [('id', '<f8'), ('ra', '<f8'), ('dec', '<f8'), ('ra_err', '<f8'), ('dec_err', '<f8'),
                 ('a', '<f8'), ('a_err', '<f8')]
        self.schema, self.key_map = ingester.makeSchema(dtype)
        self.fileReader = ReadTextCatalogTask()

        self.fakeInput = self.makeSkyCatalog(outPath=None, size=5, idStart=6543)
        self.matchedPixels = np.array([1, 1, 2, 2, 3])
        self.path = tempfile.mkdtemp()
        self.filenames = {x: os.path.join(self.path, "%d.fits" % x) for x in set(self.matchedPixels)}

        self.worker = IngestIndexManager(self.filenames,
                                         self.config,
                                         self.fileReader,
                                         self.indexer,
                                         self.schema,
                                         self.key_map,
                                         self.htm.universe()[0],
                                         addRefCatMetadata,
                                         self.log)
Esempio n. 3
0
    def testIngest(self):
        """Test IngestIndexedReferenceTask."""
        # Test with multiple files and standard config
        config = self.makeConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True)
        IngestIndexedReferenceTask.parseAndRun(
            args=[INPUT_DIR, "--output", self.outPath+"/output_multifile",
                  self.skyCatalogFile, self.skyCatalogFile],
            config=config)
        # A newly-ingested refcat should be marked format_version=1.
        loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(self.outPath+"/output_multifile"))
        self.assertEqual(loader.dataset_config.format_version, 1)

        # Test with config overrides
        config2 = self.makeConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True)
        config2.ra_name = "ra"
        config2.dec_name = "dec"
        config2.dataset_config.ref_dataset_name = 'myrefcat'
        # Change the indexing depth to prove we can.
        # Smaller is better than larger because it makes fewer files.
        config2.dataset_config.indexer.active.depth = self.depth - 1
        config2.is_photometric_name = 'is_phot'
        config2.is_resolved_name = 'is_res'
        config2.is_variable_name = 'is_var'
        config2.id_name = 'id'
        config2.extra_col_names = ['val1', 'val2', 'val3']
        config2.file_reader.header_lines = 1
        config2.file_reader.colnames = [
            'id', 'ra', 'dec', 'ra_err', 'dec_err', 'a', 'a_err', 'b', 'b_err', 'is_phot',
            'is_res', 'is_var', 'val1', 'val2', 'val3', 'pm_ra', 'pm_dec', 'pm_ra_err',
            'pm_dec_err', 'unixtime',
        ]
        config2.file_reader.delimiter = '|'
        # this also tests changing the delimiter
        IngestIndexedReferenceTask.parseAndRun(
            args=[INPUT_DIR, "--output", self.outPath+"/output_override",
                  self.skyCatalogFileDelim], config=config2)

        # This location is known to have objects
        cent = make_coord(93.0, -90.0)

        # Test if we can get back the catalog with a non-standard dataset name
        butler = dafPersist.Butler(self.outPath+"/output_override")
        loaderConfig = LoadIndexedReferenceObjectsConfig()
        loaderConfig.ref_dataset_name = "myrefcat"
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig)
        cat = loader.loadSkyCircle(cent, self.searchRadius, filterName='a').refCat
        self.assertTrue(len(cat) > 0)
        self.assertTrue(cat.isContiguous())

        # test that a catalog can be loaded even with a name not used for ingestion
        butler = dafPersist.Butler(self.testRepoPath)
        loaderConfig2 = LoadIndexedReferenceObjectsConfig()
        loaderConfig2.ref_dataset_name = self.testDatasetName
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig2)
        cat = loader.loadSkyCircle(cent, self.searchRadius, filterName='a').refCat
        self.assertTrue(len(cat) > 0)
        self.assertTrue(cat.isContiguous())
Esempio n. 4
0
    def setUpClass(cls):
        cls.outPath = tempfile.mkdtemp()
        cls.testCatPath = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "data",
            "testHtmIndex.fits")
        # arbitrary, but reasonable, amount of proper motion (angle/year)
        # and direction of proper motion
        cls.properMotionAmt = 3.0 * lsst.geom.arcseconds
        cls.properMotionDir = 45 * lsst.geom.degrees
        cls.properMotionErr = 1e-3 * lsst.geom.arcseconds
        cls.epoch = astropy.time.Time(58206.861330339219,
                                      scale="tai",
                                      format="mjd")
        ret = cls.make_skyCatalog(cls.outPath)
        cls.skyCatalogFile, cls.skyCatalogFileDelim, cls.skyCatalog = ret
        cls.testRas = [210., 14.5, 93., 180., 286., 0.]
        cls.testDecs = [-90., -51., -30.1, 0., 27.3, 62., 90.]
        cls.searchRadius = 3. * lsst.geom.degrees
        cls.compCats = {
        }  # dict of center coord: list of IDs of stars within cls.searchRadius of center
        cls.depth = 4  # gives a mean area of 20 deg^2 per pixel, roughly matching a 3 deg search radius

        config = IndexerRegistry['HTM'].ConfigClass()
        # Match on disk comparison file
        config.depth = cls.depth
        cls.indexer = IndexerRegistry['HTM'](config)
        for ra in cls.testRas:
            for dec in cls.testDecs:
                tupl = (ra, dec)
                cent = make_coord(*tupl)
                cls.compCats[tupl] = []
                for rec in cls.skyCatalog:
                    if make_coord(rec['ra_icrs'], rec['dec_icrs']).separation(
                            cent) < cls.searchRadius:
                        cls.compCats[tupl].append(rec['id'])

        cls.testRepoPath = cls.outPath + "/test_repo"
        config = cls.makeConfig(withMagErr=True,
                                withRaDecErr=True,
                                withPm=True,
                                withPmErr=True)
        # To match on disk test data
        config.dataset_config.indexer.active.depth = cls.depth
        config.id_name = 'id'
        config.pm_scale = 1000.0  # arcsec/yr --> mas/yr
        IngestIndexedReferenceTask.parseAndRun(
            args=[INPUT_DIR, "--output", cls.testRepoPath, cls.skyCatalogFile],
            config=config)
        cls.defaultDatasetName = config.dataset_config.ref_dataset_name
        cls.testDatasetName = 'diff_ref_name'
        cls.testButler = dafPersist.Butler(cls.testRepoPath)
        os.symlink(
            os.path.join(cls.testRepoPath, 'ref_cats', cls.defaultDatasetName),
            os.path.join(cls.testRepoPath, 'ref_cats', cls.testDatasetName))
Esempio n. 5
0
 def runTest(withRaDecErr):
     outputPath = os.path.join(self.outPath, "output_setsVersion"
                               + "_withRaDecErr" if withRaDecErr else "")
     # Test with multiple files and standard config
     config = makeIngestIndexConfig(withRaDecErr=withRaDecErr, withMagErr=True,
                                    withPm=True, withPmErr=True)
     # don't use the default depth, to avoid taking the time to create thousands of file locks
     config.dataset_config.indexer.active.depth = self.depth
     IngestIndexedReferenceTask.parseAndRun(
         args=[self.input_dir, "--output", outputPath, self.skyCatalogFile],
         config=config)
     # A newly-ingested refcat should be marked format_version=1.
     loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(outputPath))
     self.assertEqual(loader.dataset_config.format_version, 1)
    def setUpClass(cls):
        cls.out_path = tempfile.mkdtemp()
        cls.test_cat_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), "data",
            "testHtmIndex.fits")
        cls.test_cat = afwTable.SourceCatalog.readFits(cls.test_cat_path)
        ret = cls.make_sky_catalog(cls.out_path)
        cls.sky_catalog_file, cls.sky_catalog_file_delim, cls.sky_catalog = ret
        cls.test_ras = [210., 14.5, 93., 180., 286., 0.]
        cls.test_decs = [-90., -51., -30.1, 0., 27.3, 62., 90.]
        cls.search_radius = 3. * afwGeom.degrees
        cls.comp_cats = {
        }  # dict of center coord: list of IDs of stars within cls.search_radius of center
        cls.depth = 4  # gives a mean area of 20 deg^2 per pixel, roughly matching a 3 deg search radius
        config = IndexerRegistry['HTM'].ConfigClass()
        # Match on disk comparison file
        config.depth = cls.depth
        cls.indexer = IndexerRegistry['HTM'](config)
        for ra in cls.test_ras:
            for dec in cls.test_decs:
                tupl = (ra, dec)
                cent = make_coord(*tupl)
                cls.comp_cats[tupl] = []
                for rec in cls.sky_catalog:
                    if make_coord(rec['ra_icrs'], rec['dec_icrs']).separation(
                            cent) < cls.search_radius:
                        cls.comp_cats[tupl].append(rec['id'])

        cls.test_repo_path = cls.out_path + "/test_repo"
        config = IngestIndexedReferenceTask.ConfigClass()
        # To match on disk test data
        config.dataset_config.indexer.active.depth = cls.depth
        config.ra_name = 'ra_icrs'
        config.dec_name = 'dec_icrs'
        config.mag_column_list = ['a', 'b']
        config.id_name = 'id'
        config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'}
        IngestIndexedReferenceTask.parseAndRun(args=[
            input_dir, "--output", cls.test_repo_path, cls.sky_catalog_file
        ],
                                               config=config)
        cls.default_dataset_name = config.dataset_config.ref_dataset_name
        cls.test_dataset_name = 'diff_ref_name'
        cls.test_butler = dafPersist.Butler(cls.test_repo_path)
        os.symlink(
            os.path.join(cls.test_repo_path, 'ref_cats',
                         cls.default_dataset_name),
            os.path.join(cls.test_repo_path, 'ref_cats',
                         cls.test_dataset_name))
Esempio n. 7
0
 def testIngestSetsVersion(self):
     """Test that newly ingested catalogs get the correct version number set.
     """
     # Test with multiple files and standard config
     config = self.makeConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True)
     # don't use the default depth, to avoid taking the time to create thousands of file locks
     config.dataset_config.indexer.active.depth = self.depth
     IngestIndexedReferenceTask.parseAndRun(
         args=[self.input_dir, "--output", self.outPath + "/output_setsVersion",
               self.skyCatalogFile],
         config=config)
     # A newly-ingested refcat should be marked format_version=1.
     loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(
         self.outPath + "/output_setsVersion"))
     self.assertEqual(loader.dataset_config.format_version, 1)
    def testAgainstPersisted(self):
        shardId = 2222
        dataset_name = IngestIndexedReferenceTask.ConfigClass(
        ).dataset_config.ref_dataset_name
        dataId = self.indexer.makeDataId(shardId, dataset_name)
        self.assertTrue(self.testButler.datasetExists('ref_cat', dataId))
        refCat = self.testButler.get('ref_cat', dataId)
        if REGENERATE_COMPARISON:
            if os.path.exists(self.testCatPath):
                os.unlink(self.testCatPath)
            refCat.writeFits(self.testCatPath)
            self.fail(
                "New comparison data written; unset REGENERATE_COMPARISON in order to proceed"
            )

        ex1 = refCat.extract('*')
        testCat = afwTable.SimpleCatalog.readFits(self.testCatPath)

        ex2 = testCat.extract('*')
        self.assertEqual(set(ex1.keys()), set(ex2.keys()))
        for kk in ex1:
            np.testing.assert_array_almost_equal(
                ex1[kk],
                ex2[kk],
            )
Esempio n. 9
0
    def setUpClass(cls):
        cls.outPath = tempfile.mkdtemp()
        cls.testCatPath = os.path.join(os.path.dirname(os.path.realpath(__file__)), "data",
                                       "testHtmIndex.fits")
        # arbitrary, but reasonable, amount of proper motion (angle/year)
        # and direction of proper motion
        cls.properMotionAmt = 3.0*lsst.geom.arcseconds
        cls.properMotionDir = 45*lsst.geom.degrees
        cls.properMotionErr = 1e-3*lsst.geom.arcseconds
        cls.epoch = astropy.time.Time(58206.861330339219, scale="tai", format="mjd")
        ret = cls.make_skyCatalog(cls.outPath)
        cls.skyCatalogFile, cls.skyCatalogFileDelim, cls.skyCatalog = ret
        cls.testRas = [210., 14.5, 93., 180., 286., 0.]
        cls.testDecs = [-90., -51., -30.1, 0., 27.3, 62., 90.]
        cls.searchRadius = 3. * lsst.geom.degrees
        cls.compCats = {}  # dict of center coord: list of IDs of stars within cls.searchRadius of center
        cls.depth = 4  # gives a mean area of 20 deg^2 per pixel, roughly matching a 3 deg search radius

        config = IndexerRegistry['HTM'].ConfigClass()
        # Match on disk comparison file
        config.depth = cls.depth
        cls.indexer = IndexerRegistry['HTM'](config)
        for ra in cls.testRas:
            for dec in cls.testDecs:
                tupl = (ra, dec)
                cent = make_coord(*tupl)
                cls.compCats[tupl] = []
                for rec in cls.skyCatalog:
                    if make_coord(rec['ra_icrs'], rec['dec_icrs']).separation(cent) < cls.searchRadius:
                        cls.compCats[tupl].append(rec['id'])

        cls.testRepoPath = cls.outPath+"/test_repo"
        config = cls.makeConfig(withMagErr=True, withRaDecErr=True, withPm=True, withPmErr=True)
        # To match on disk test data
        config.dataset_config.indexer.active.depth = cls.depth
        config.id_name = 'id'
        config.pm_scale = 1000.0  # arcsec/yr --> mas/yr
        IngestIndexedReferenceTask.parseAndRun(args=[INPUT_DIR, "--output", cls.testRepoPath,
                                                     cls.skyCatalogFile], config=config)
        cls.defaultDatasetName = config.dataset_config.ref_dataset_name
        cls.testDatasetName = 'diff_ref_name'
        cls.testButler = dafPersist.Butler(cls.testRepoPath)
        os.symlink(os.path.join(cls.testRepoPath, 'ref_cats', cls.defaultDatasetName),
                   os.path.join(cls.testRepoPath, 'ref_cats', cls.testDatasetName))
 def testAgainstPersisted(self):
     pix_id = 671901
     dataset_name = IngestIndexedReferenceTask.ConfigClass(
     ).dataset_config.ref_dataset_name
     data_id = self.indexer.make_data_id(pix_id, dataset_name)
     self.assertTrue(self.test_butler.datasetExists('ref_cat', data_id))
     ref_cat = self.test_butler.get('ref_cat', data_id)
     ex1 = ref_cat.extract('*')
     ex2 = self.test_cat.extract('*')
     # compare sets as the order may be different
     self.assertDictEqual(ex1, ex2)
Esempio n. 11
0
    def testIngestConfigOverrides(self):
        """Test IngestIndexedReferenceTask with different configs.
        """
        config2 = makeIngestIndexConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True,
                                        withParallax=True)
        config2.ra_name = "ra"
        config2.dec_name = "dec"
        config2.dataset_config.ref_dataset_name = 'myrefcat'
        # Change the indexing depth to prove we can.
        # Smaller is better than larger because it makes fewer files.
        config2.dataset_config.indexer.active.depth = self.depth - 1
        config2.is_photometric_name = 'is_phot'
        config2.is_resolved_name = 'is_res'
        config2.is_variable_name = 'is_var'
        config2.id_name = 'id'
        config2.extra_col_names = ['val1', 'val2', 'val3']
        config2.file_reader.header_lines = 1
        config2.file_reader.colnames = [
            'id', 'ra', 'dec', 'ra_err', 'dec_err', 'a', 'a_err', 'b', 'b_err', 'is_phot',
            'is_res', 'is_var', 'val1', 'val2', 'val3', 'pm_ra', 'pm_dec', 'pm_ra_err',
            'pm_dec_err', 'parallax', 'parallax_err', 'unixtime',
        ]
        config2.file_reader.delimiter = '|'
        # this also tests changing the delimiter
        IngestIndexedReferenceTask.parseAndRun(
            args=[self.input_dir, "--output", self.outPath+"/output_override",
                  self.skyCatalogFileDelim], config=config2)

        # Test if we can get back the catalog with a non-standard dataset name
        butler = dafPersist.Butler(self.outPath+"/output_override")
        loaderConfig = LoadIndexedReferenceObjectsConfig()
        loaderConfig.ref_dataset_name = "myrefcat"
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig)
        self.checkAllRowsInRefcat(loader, self.skyCatalog, config2)

        # test that a catalog can be loaded even with a name not used for ingestion
        butler = dafPersist.Butler(self.testRepoPath)
        loaderConfig2 = LoadIndexedReferenceObjectsConfig()
        loaderConfig2.ref_dataset_name = self.testDatasetName
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig2)
        self.checkAllRowsInRefcat(loader, self.skyCatalog, config2)
    def makeConfig(withMagErr=False,
                   withRaDecErr=False,
                   withPm=False,
                   withPmErr=False,
                   withParallax=False,
                   withParallaxErr=False):
        """Make a config for IngestIndexedReferenceTask

        This is primarily intended to simplify tests of config validation,
        so fields that are not validated are not set.
        However, it can calso be used to reduce boilerplate in other tests.
        """
        config = IngestIndexedReferenceTask.ConfigClass()
        config.pm_scale = 1000.0
        config.ra_name = 'ra_icrs'
        config.dec_name = 'dec_icrs'
        config.mag_column_list = ['a', 'b']

        if withMagErr:
            config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'}

        if withRaDecErr:
            config.ra_err_name = "ra_err"
            config.dec_err_name = "dec_err"

        if withPm:
            config.pm_ra_name = "pm_ra"
            config.pm_dec_name = "pm_dec"

        if withPmErr:
            config.pm_ra_err_name = "pm_ra_err"
            config.pm_dec_err_name = "pm_dec_err"

        if withParallax:
            config.parallax_name = "parallax"

        if withParallaxErr:
            config.parallax_err_name = "parallax_err"

        if withPm or withParallax:
            config.epoch_name = "unixtime"
            config.epoch_format = "unix"
            config.epoch_scale = "utc"

        return config
    def testAgainstPersisted(self):
        pix_id = 2222
        dataset_name = IngestIndexedReferenceTask.ConfigClass(
        ).dataset_config.ref_dataset_name
        data_id = self.indexer.make_data_id(pix_id, dataset_name)
        self.assertTrue(self.test_butler.datasetExists('ref_cat', data_id))
        ref_cat = self.test_butler.get('ref_cat', data_id)
        if REGENERATE_COMPARISON:
            os.unlink(self.test_cat_path)
            ref_cat.writeFits(self.test_cat_path)
            self.fail(
                "New comparison data written; unset REGENERATE_COMPARISON in order to proceed"
            )

        ex1 = ref_cat.extract('*')
        ex2 = self.test_cat.extract('*')
        self.assertEqual(set(ex1.keys()), set(ex2.keys()))
        for kk in ex1:
            np.testing.assert_array_equal(ex1[kk], ex2[kk])
Esempio n. 14
0
#!/usr/bin/env python
from lsst.meas.algorithms import IngestIndexedReferenceTask
IngestIndexedReferenceTask.parseAndRun()
Esempio n. 15
0
    def testIngest(self):
        """Test IngestIndexedReferenceTask."""
        # Test with multiple files and standard config
        config = self.makeConfig(withRaDecErr=True,
                                 withMagErr=True,
                                 withPm=True,
                                 withPmErr=True)
        IngestIndexedReferenceTask.parseAndRun(args=[
            INPUT_DIR, "--output", self.outPath + "/output_multifile",
            self.skyCatalogFile, self.skyCatalogFile
        ],
                                               config=config)

        # Test with config overrides
        config2 = self.makeConfig(withRaDecErr=True,
                                  withMagErr=True,
                                  withPm=True,
                                  withPmErr=True)
        config2.ra_name = "ra"
        config2.dec_name = "dec"
        config2.dataset_config.ref_dataset_name = 'myrefcat'
        # Change the indexing depth to prove we can.
        # Smaller is better than larger because it makes fewer files.
        config2.dataset_config.indexer.active.depth = self.depth - 1
        config2.is_photometric_name = 'is_phot'
        config2.is_resolved_name = 'is_res'
        config2.is_variable_name = 'is_var'
        config2.id_name = 'id'
        config2.extra_col_names = ['val1', 'val2', 'val3']
        config2.file_reader.header_lines = 1
        config2.file_reader.colnames = [
            'id',
            'ra',
            'dec',
            'ra_err',
            'dec_err',
            'a',
            'a_err',
            'b',
            'b_err',
            'is_phot',
            'is_res',
            'is_var',
            'val1',
            'val2',
            'val3',
            'pm_ra',
            'pm_dec',
            'pm_ra_err',
            'pm_dec_err',
            'unixtime',
        ]
        config2.file_reader.delimiter = '|'
        # this also tests changing the delimiter
        IngestIndexedReferenceTask.parseAndRun(args=[
            INPUT_DIR, "--output", self.outPath + "/output_override",
            self.skyCatalogFileDelim
        ],
                                               config=config2)

        # This location is known to have objects
        cent = make_coord(93.0, -90.0)

        # Test if we can get back the catalog with a non-standard dataset name
        butler = dafPersist.Butler(self.outPath + "/output_override")
        loaderConfig = LoadIndexedReferenceObjectsConfig()
        loaderConfig.ref_dataset_name = "myrefcat"
        loader = LoadIndexedReferenceObjectsTask(butler=butler,
                                                 config=loaderConfig)
        cat = loader.loadSkyCircle(cent, self.searchRadius,
                                   filterName='a').refCat
        self.assertTrue(len(cat) > 0)
        self.assertTrue(cat.isContiguous())

        # test that a catalog can be loaded even with a name not used for ingestion
        butler = dafPersist.Butler(self.testRepoPath)
        loaderConfig2 = LoadIndexedReferenceObjectsConfig()
        loaderConfig2.ref_dataset_name = self.testDatasetName
        loader = LoadIndexedReferenceObjectsTask(butler=butler,
                                                 config=loaderConfig2)
        cat = loader.loadSkyCircle(cent, self.searchRadius,
                                   filterName='a').refCat
        self.assertTrue(len(cat) > 0)
        self.assertTrue(cat.isContiguous())
    def testIngest(self):
        """Test IngestIndexedReferenceTask."""
        default_config = IngestIndexedReferenceTask.ConfigClass()
        # test ingest with default config
        # This should raise since I haven't specified the ra/dec/mag columns.
        with self.assertRaises(ValueError):
            IngestIndexedReferenceTask.parseAndRun(args=[
                input_dir, "--output", self.out_path + "/output",
                self.sky_catalog_file
            ],
                                                   config=default_config)
        # test with ~minimum config.  Mag errors are not technically necessary, but might as well test here
        default_config.ra_name = 'ra_icrs'
        default_config.dec_name = 'dec_icrs'
        default_config.mag_column_list = ['a', 'b']
        default_config.mag_err_column_map = {'a': 'a_err'}
        # should raise since all columns need an error column if any do
        with self.assertRaises(ValueError):
            IngestIndexedReferenceTask.parseAndRun(args=[
                input_dir, "--output", self.out_path + "/output",
                self.sky_catalog_file
            ],
                                                   config=default_config)
        # test with multiple files and correct config
        default_config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'}
        IngestIndexedReferenceTask.parseAndRun(args=[
            input_dir, "--output", self.out_path + "/output_multifile",
            self.sky_catalog_file, self.sky_catalog_file
        ],
                                               config=default_config)
        # test with config overrides
        default_config = IngestIndexedReferenceTask.ConfigClass()
        default_config.ra_name = 'ra'
        default_config.dec_name = 'dec'
        default_config.mag_column_list = ['a', 'b']
        default_config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'}
        default_config.dataset_config.ref_dataset_name = 'myrefcat'
        default_config.dataset_config.indexer.active.depth = 10
        default_config.is_photometric_name = 'is_phot'
        default_config.is_resolved_name = 'is_res'
        default_config.is_variable_name = 'is_var'
        default_config.id_name = 'id'
        default_config.extra_col_names = ['val1', 'val2', 'val3']
        default_config.file_reader.header_lines = 1
        default_config.file_reader.colnames = [
            'id', 'ra', 'dec', 'a', 'a_err', 'b', 'b_err', 'is_phot', 'is_res',
            'is_var', 'val1', 'val2', 'val3'
        ]
        default_config.file_reader.delimiter = '|'
        # this also tests changing the delimiter
        IngestIndexedReferenceTask.parseAndRun(args=[
            input_dir, "--output", self.out_path + "/output_override",
            self.sky_catalog_file_delim
        ],
                                               config=default_config)

        # This location is known to have objects
        cent = make_coord(93.0, -90.0)

        # Test if we can get back the catalog with a non-standard dataset name
        butler = dafPersist.Butler(self.out_path + "/output_override")
        config = LoadIndexedReferenceObjectsConfig()
        config.ref_dataset_name = "myrefcat"
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=config)
        cat = loader.loadSkyCircle(cent, self.search_radius, filterName='a')
        self.assertTrue(len(cat) > 0)

        # test that a catalog can be loaded even with a name not used for ingestion
        butler = dafPersist.Butler(self.test_repo_path)
        config = LoadIndexedReferenceObjectsConfig()
        config.ref_dataset_name = self.test_dataset_name
        loader = LoadIndexedReferenceObjectsTask(butler=butler, config=config)
        cat = loader.loadSkyCircle(cent, self.search_radius, filterName='a')
        self.assertTrue(len(cat) > 0)
    def testIngest(self):
        """Test IngestIndexedReferenceTask with different configs."""
        # Test with multiple files and standard config
        config = self.makeConfig(withRaDecErr=True,
                                 withMagErr=True,
                                 withPm=True,
                                 withPmErr=True)
        # don't use the default depth, to avoid taking the time to create thousands of file locks
        config.dataset_config.indexer.active.depth = self.depth
        IngestIndexedReferenceTask.parseAndRun(args=[
            self.input_dir, "--output", self.outPath + "/output_multifile",
            self.skyCatalogFile, self.skyCatalogFile
        ],
                                               config=config)
        # A newly-ingested refcat should be marked format_version=1.
        loader = LoadIndexedReferenceObjectsTask(
            butler=dafPersist.Butler(self.outPath + "/output_multifile"))
        self.assertEqual(loader.dataset_config.format_version, 1)

        # Test with config overrides
        config2 = self.makeConfig(withRaDecErr=True,
                                  withMagErr=True,
                                  withPm=True,
                                  withPmErr=True)
        config2.ra_name = "ra"
        config2.dec_name = "dec"
        config2.dataset_config.ref_dataset_name = 'myrefcat'
        # Change the indexing depth to prove we can.
        # Smaller is better than larger because it makes fewer files.
        config2.dataset_config.indexer.active.depth = self.depth - 1
        config2.is_photometric_name = 'is_phot'
        config2.is_resolved_name = 'is_res'
        config2.is_variable_name = 'is_var'
        config2.id_name = 'id'
        config2.extra_col_names = ['val1', 'val2', 'val3']
        config2.file_reader.header_lines = 1
        config2.file_reader.colnames = [
            'id',
            'ra',
            'dec',
            'ra_err',
            'dec_err',
            'a',
            'a_err',
            'b',
            'b_err',
            'is_phot',
            'is_res',
            'is_var',
            'val1',
            'val2',
            'val3',
            'pm_ra',
            'pm_dec',
            'pm_ra_err',
            'pm_dec_err',
            'unixtime',
        ]
        config2.file_reader.delimiter = '|'
        # this also tests changing the delimiter
        IngestIndexedReferenceTask.parseAndRun(args=[
            self.input_dir, "--output", self.outPath + "/output_override",
            self.skyCatalogFileDelim
        ],
                                               config=config2)

        # Test if we can get back the catalog with a non-standard dataset name
        butler = dafPersist.Butler(self.outPath + "/output_override")
        loaderConfig = LoadIndexedReferenceObjectsConfig()
        loaderConfig.ref_dataset_name = "myrefcat"
        loader = LoadIndexedReferenceObjectsTask(butler=butler,
                                                 config=loaderConfig)
        self.checkAllRowsInRefcat(loader, self.skyCatalog)

        # test that a catalog can be loaded even with a name not used for ingestion
        butler = dafPersist.Butler(self.testRepoPath)
        loaderConfig2 = LoadIndexedReferenceObjectsConfig()
        loaderConfig2.ref_dataset_name = self.testDatasetName
        loader = LoadIndexedReferenceObjectsTask(butler=butler,
                                                 config=loaderConfig2)
        self.checkAllRowsInRefcat(loader, self.skyCatalog)
                          "%i", "%.15g", "%.15g", "%.15g", "%.15g", "%.15g",
                          "%.15g", "%.15g", "%.15g", "%i", "%i", "%i", "%.15g",
                          "%.15g", "%.15g", "%.15g", "%.15g"
                      ])

    np.savetxt(outPath + "/ref.txt", arr, delimiter=",", **saveKwargs)
    return outPath + "/ref.txt"


inPath = os.path.join(lsst.utils.getPackageDir('meas_algorithms'),
                      'tests/data')
outPath = os.path.join(inPath, "version%s" % LATEST_FORMAT_VERSION)

skyCatalogFile = make_skyCatalog(inPath)

config = IngestIndexedReferenceTask.ConfigClass()
config.ra_name = 'ra_icrs'
config.dec_name = 'dec_icrs'
config.mag_column_list = ['a', 'b']
config.mag_err_column_map = {'a': 'a_err', 'b': 'b_err'}
config.dataset_config.indexer.active.depth = 4

OBS_TEST_DIR = lsst.utils.getPackageDir('obs_test')
obsTestPath = os.path.join(OBS_TEST_DIR, "data", "input")
IngestIndexedReferenceTask.parseAndRun(
    args=[obsTestPath, "--output", outPath, skyCatalogFile], config=config)

# cleanup files and make the mapper independent of obs_test's path
os.remove(skyCatalogFile)
os.remove(os.path.join(outPath, 'repositoryCfg.yaml'))
with open(os.path.join(outPath, '_mapper'), 'w') as mapperFile: