def test_muselet_full(tmpdir, minicube): """test MUSELET""" outdir = str(tmpdir) print('Working directory:', outdir) muselet(minicube.filename, write_nbcube=True, cleanup=True, workdir=outdir, n_cpu=2) # NB cube produced? assert os.path.isfile( str(tmpdir.join('NB_' + os.path.basename(minicube.filename)))) # get catalogs cat_lines = Catalog.read(str(tmpdir.join('lines.fit'))) cat_objects = Catalog.read(str(tmpdir.join('objects.fit'))) files_lines = glob(str(tmpdir.join('lines/*'))) files_objects = glob(str(tmpdir.join('objects/*'))) # check same length as number of sources assert len(cat_lines) == len(files_lines) assert len(cat_objects) == len(files_objects) assert len(cat_lines) == 61 assert len(cat_objects) == 15
def test_muselet_fast(tmpdir, minicube): """test MUSELET""" outdir = str(tmpdir) filename = str(tmpdir.join('cube.fits')) cube = minicube[1800:2000, :, :] cube.write(filename, savemask='nan') print('Working directory:', outdir) muselet(filename, write_nbcube=True, cleanup=True, workdir=outdir) # NB cube produced? assert os.path.isfile(str(tmpdir.join('NB_cube.fits'))) # get catalogs cat_lines = Catalog.read(str(tmpdir.join('lines.fit'))) cat_objects = Catalog.read(str(tmpdir.join('objects.fit'))) files_lines = glob(str(tmpdir.join('lines/*'))) files_objects = glob(str(tmpdir.join('objects/*'))) # check same length as number of sources assert len(cat_lines) == len(files_lines) assert len(cat_objects) == len(files_objects) assert len(cat_lines) == 34 assert len(cat_objects) == 12
def test_tods9(tmpdir): cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii') regfile = str(tmpdir.join('test.reg')) cat.to_ds9_regions(regfile) with open(regfile) as f: assert f.readlines()[:4] == [ '# Region file format: DS9 astropy/regions\n', 'fk5\n', 'circle(63.356106,10.466166,0.000278)\n', 'circle(63.355404,10.464703,0.000278)\n', ]
def test_from_path(source1, source2, tmpdir): with pytest.raises(IOError): cat = Catalog.from_path('/not/a/valid/path') source1.write(str(tmpdir.join('source1.fits'))) source2.write(str(tmpdir.join('source2.fits'))) cat = Catalog.from_path(str(tmpdir)) assert len(cat) == 2 # 2 additional columns vs from_sources: FILENAME is added by from_path, and # SOURCE_V which was added in the Source.write assert len(cat.colnames) == 47 for name in ('cat.fits', 'cat.csv'): filename = str(tmpdir.join(name)) cat.write(filename) c = Catalog.read(filename) assert c.colnames == cat.colnames assert len(cat) == 2 assert isinstance(c, Catalog)
def test_select(minicube): cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii') im = minicube.mean(axis=0) # Note im.shape is (40, 40) and cat has 8 rows all inside the image assert len(cat) == 8 # all sources are in the image assert len(cat.select(im.wcs, margin=0)) == 8 # using a margin removing sources on the edges assert len(cat.select(im.wcs, margin=5)) == 4 # Create a mask with the bottom and left edges masked mask = np.ones(im.shape, dtype=bool) mask[5:, 5:] = False # using a margin removing sources on the edges assert len(cat.select(im.wcs, mask=mask)) == 4 assert len(cat.select(im.wcs, margin=1, mask=mask)) == 4
def __init__(self, cube=None, cubeProcessed=None, catalog=None, listSources=None, listID=None, params=None): """ Several Input choices are available: * List of MUSE Sources : These sources needs a MUSE_CUBE in their cubes extension. * Complete cube with a MUSE catalogue -> all Lyman-alpha emitters will be treated * Complete cube, catalogue and listID -> all the source of the ID list will be treated An already preprocessed cubeProcessed can also by passed along. Param: Cube object *cube*, MUSE datacube (optionnal if listSources is defined) Param: Cube object *cubeProcessed*, preprocessed datacube Param: String *catalog*, filename of a MUSE catalog Param: list of Sources object *listSources* (optionnal) Param: list of sources IDs *listID*, list of sources to extract from the cube using the catalog. Param: objet Params *params*, parameters for the method, if not defined, default parameters are used """ if params is None: self.params = parameters.Params() else: self.params = params if cube is not None: self.cube = cube else: self.cube = None if catalog is not None: self.catalog = Catalog.read(catalog) if cubeProcessed is not None: self.cubeProcessed = cubeProcessed else: self.cubeProcessed = None if listSources is not None: self.listSources = listSources elif listSources is None and listID is None: hdulist = pyfits.open(catalog) listID = [] for k in range(len(hdulist[1].data)): if hdulist[1].data[k][1] == 'Lya' and hdulist[1].data[k][ 4] > 0: #We get all Lyman alpha with a defined redshift listID.append(hdulist[1].data[k][0]) self.listSources = [] for k in listID: self.listSources.append(self.sourceFromCatalog(k)) elif listID is not None: self.listSources = [] for k in listID: self.listSources.append(self.sourceFromCatalog(k)) self.listCorrArr = [] self.listPvalMap = [] self.preprocessing = None self.postprocessing = None self.paramsPreProcess = parameters.ParamsPreProcess() self.paramsPostProcess = parameters.ParamsPostProcess() self.paramsDetection = parameters.ParamsDetection()
def test_origin(caplog, tmpdir): """Test the full ORIGIN process.""" orig = ORIGIN.init(MINICUBE, name='tmp', loglevel='INFO', path=str(tmpdir)) orig.write() origfolder = str(tmpdir.join('tmp')) # test that log level is correctly reloaded, then change it orig = ORIGIN.load(origfolder) assert orig.logger.handlers[0].level == 20 orig.set_loglevel('DEBUG') assert orig.logger.handlers[0].level == 10 orig.step01_preprocessing() assert orig.ima_dct is not None assert orig.ima_std is not None orig.write() orig = ORIGIN.load(origfolder) orig.step02_areas(minsize=30, maxsize=60) assert orig.param['nbareas'] == 4 assert list(np.unique(orig.areamap._data)) == [1, 2, 3, 4] orig.write() orig = ORIGIN.load(origfolder) assert orig.param['nbareas'] == 4 orig.step03_compute_PCA_threshold() orig.step04_compute_greedy_PCA() # TGLR computing (normalized correlations) orig.step05_compute_TGLR(ncpu=1) # orig.step05_compute_TGLR(ncpu=1, NbSubcube=2) # threshold applied on pvalues orig.step06_compute_purity_threshold(purity=0.8) # FIXME: threshold is hardcoded for now orig.step07_detection(threshold=9.28, segmap=SEGMAP) # estimation orig.step08_compute_spectra() orig.write() cat = Catalog.read(str(tmpdir.join('tmp', 'Cat1.fits'))) subcat = cat[cat['comp'] == 0] assert np.all(np.isnan(np.array(subcat['STD']))) # Test that the columns mask is correct. To be tested when we switch # back to a masked table # assert np.all(subcat['T_GLR'].mask == False) # assert np.all(subcat['STD'].mask == True) # cleaned results orig = ORIGIN.load(origfolder, newname='tmp2') orig.step09_clean_results() orig.write() # check that the catalog version was saves assert "CAT3_TS" in Catalog.read( str(tmpdir.join('tmp2', 'Cat3_lines.fits'))).meta assert "CAT3_TS" in Catalog.read( str(tmpdir.join('tmp2', 'Cat3_sources.fits'))).meta # create masks origfolder2 = str(tmpdir.join('tmp2')) orig = ORIGIN.load(origfolder2) orig.step10_create_masks() orig.write() # list of source objects orig = ORIGIN.load(origfolder2) orig.step11_save_sources("0.1") orig.step11_save_sources("0.1", n_jobs=2, overwrite=True) orig.info() with open(orig.logfile) as f: log = f.read().splitlines() assert '11 Done' in log[-1] tbl = orig.timestat(table=True) assert len(tbl) == 12 assert tbl.colnames == ['Step', 'Exec Date', 'Exec Time'] caplog.clear() orig.timestat() rec = caplog.records assert rec[0].message.startswith('step01_preprocessing executed:') assert rec[10].message.startswith('step11_save_sources executed:') assert rec[11].message.startswith('*** Total run time:') caplog.clear() orig.stat() assert [rec.message for rec in caplog.records] == [ 'ORIGIN PCA pfa 0.01 Back Purity: 0.80 Threshold: 9.28 ' 'Bright Purity 0.80 Threshold 5.46', 'Nb of detected lines: 16', 'Nb of sources Total: 6 Background: 3 Cont: 3', 'Nb of sources detected in faint (after PCA): 4 in std (before PCA): 2', ] cat = Catalog.read(str(tmpdir.join('tmp2', 'Cat3_lines.fits'))) assert len(cat) == 16 assert max(cat['ID']) == 6 # test returned sources are valid src1 = Source.from_file( str(tmpdir.join('tmp2', 'sources', 'source-00001.fits'))) src2 = Source.from_file( str(tmpdir.join('tmp2', 'sources', 'source-00002.fits'))) # FIXME: check if this test is really useful # assert set(sp.shape[0] for sp in src.spectra.values()) == {22, 1100} assert {ima.shape for ima in src1.images.values()} == {(25, 25)} assert src1.cubes['MUSE_CUBE'].shape == (1100, 25, 25) assert "SRC_TS" in src1.header assert src1.header["CAT3_TS"] == src2.header["CAT3_TS"] assert src1.header["SRC_TS"] == src2.header["SRC_TS"] # Cleanup (try to close opened files to avoid warnings) for h in orig.logger.handlers: h.close()
def test_edgedist(minicube): cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii') im = minicube.mean(axis=0) ref = [2.29, 0.43, 2.83, 0.19, 2.70, 0.16, 0.11, 1.51] assert_almost_equal(cat.edgedist(im.wcs), ref, decimal=2)