예제 #1
0
def test_match3Dline():
    c1 = Catalog()
    c1['RA'] = np.arange(10, dtype=float)
    c1['DEC'] = np.arange(10, dtype=float)
    c1['LBDA'] = np.arange(10, dtype=float)

    c2 = Table()
    c2['ra'] = np.arange(20, dtype=float) + 0.5 / 3600
    c2['dec'] = np.arange(20, dtype=float) - 0.5 / 3600
    c2['lbda'] = np.arange(20, dtype=float) + 3

    match = c1.match3Dline(c2, ['LBDA'], ['lbda'],
                           colc2=('ra', 'dec'),
                           full_output=False)
    assert len(match) == 10
    assert_almost_equal(match['DIST'], 0.705, decimal=2)
    assert_array_equal(match['M_LBDA_1'], True)

    match3d, match2d, unmatch1, unmatch2 = c1.match3Dline(c2, ['LBDA'],
                                                          ['lbda'],
                                                          colc2=('ra', 'dec'))
    assert len(match3d) == 10
    assert len(unmatch2) == 10

    match = c1.match3Dline(c2, ['LBDA'], ['lbda'],
                           colc2=('ra', 'dec'),
                           full_output=False,
                           spectral_window=1)
    assert_array_equal(match['M_LBDA_1'], False)
예제 #2
0
def test_meta():
    c1 = Catalog(idname='ID', raname='RA', decname='DEC')
    c1['ID'] = np.arange(10, dtype=int)
    c1['RA'] = np.arange(10, dtype=float)
    c1['DEC'] = np.arange(10, dtype=float)

    assert c1.meta['idname'] is c1.meta['IDNAME']

    c2 = Table()
    c2['id'] = np.arange(20, dtype=int)
    c2['RA'] = np.arange(20, dtype=float) + 0.5 / 3600
    c2['DEC'] = np.arange(20, dtype=float) - 0.5 / 3600
    c2.meta['idname'] = 'id'
    c2.meta['raname'] = 'RA'

    match, nomatch1, nomatch2 = c1.match(c2, full_output=True)
    assert len(match) == 10
    assert type(match.meta) == type(c1.meta)

    assert match.meta['idname'] == 'ID'
    assert match.meta['idname_1'] == 'ID'
    assert match.meta['idname_2'] == 'id'
    assert match.meta['raname'] == 'RA_1'
    assert match.meta['raname_1'] == 'RA_1'
    assert match.meta['raname_2'] == 'RA_2'

    assert nomatch1.meta['idname'] == 'ID'
    assert nomatch2.meta['idname'] == 'id'
예제 #3
0
def test_match():
    c1 = Catalog()
    c1['RA'] = np.arange(10, dtype=float)
    c1['DEC'] = np.arange(10, dtype=float)

    c2 = Table()
    c2['ra'] = np.arange(20, dtype=float) + 0.5 / 3600
    c2['dec'] = np.arange(20, dtype=float) - 0.5 / 3600

    match = c1.match(c2, colc2=('ra', 'dec'), full_output=False)
    assert len(match) == 10
    assert_almost_equal(match['Distance'], 0.705, decimal=2)

    # create a duplicate match
    c1['RA'][4] = c1['RA'][3] - 0.1 / 3600
    c1['DEC'][4] = c1['DEC'][3] - 0.1 / 3600

    c2['ra'][:5] = np.arange(5, dtype=float) + 0.1 / 3600
    c2['dec'][:5] = np.arange(5, dtype=float) + 0.1 / 3600

    match, nomatch1, nomatch2 = c1.match(c2,
                                         colc2=('ra', 'dec'),
                                         radius=0.5,
                                         full_output=True)
    assert len(match) == 4
    assert len(nomatch1) == 6
    assert len(nomatch2) == 16
    assert type(nomatch2) == type(c2)
예제 #4
0
def test_muselet_full(tmpdir, minicube):
    """test MUSELET"""
    outdir = str(tmpdir)
    print('Working directory:', outdir)

    muselet(minicube.filename,
            write_nbcube=True,
            cleanup=True,
            workdir=outdir,
            n_cpu=2)

    # NB cube produced?
    assert os.path.isfile(
        str(tmpdir.join('NB_' + os.path.basename(minicube.filename))))

    # get catalogs
    cat_lines = Catalog.read(str(tmpdir.join('lines.fit')))
    cat_objects = Catalog.read(str(tmpdir.join('objects.fit')))

    files_lines = glob(str(tmpdir.join('lines/*')))
    files_objects = glob(str(tmpdir.join('objects/*')))

    # check same length as number of sources
    assert len(cat_lines) == len(files_lines)
    assert len(cat_objects) == len(files_objects)

    assert len(cat_lines) == 61
    assert len(cat_objects) == 15
예제 #5
0
def test_muselet_fast(tmpdir, minicube):
    """test MUSELET"""
    outdir = str(tmpdir)
    filename = str(tmpdir.join('cube.fits'))
    cube = minicube[1800:2000, :, :]
    cube.write(filename, savemask='nan')
    print('Working directory:', outdir)
    muselet(filename, write_nbcube=True, cleanup=True, workdir=outdir)

    # NB cube produced?
    assert os.path.isfile(str(tmpdir.join('NB_cube.fits')))

    # get catalogs
    cat_lines = Catalog.read(str(tmpdir.join('lines.fit')))
    cat_objects = Catalog.read(str(tmpdir.join('objects.fit')))

    files_lines = glob(str(tmpdir.join('lines/*')))
    files_objects = glob(str(tmpdir.join('objects/*')))

    # check same length as number of sources
    assert len(cat_lines) == len(files_lines)
    assert len(cat_objects) == len(files_objects)

    assert len(cat_lines) == 34
    assert len(cat_objects) == 12
예제 #6
0
def test_catalog():
    cat = Catalog(rows=[[1, 50., 10., 2., -9999], [2, 40., 20., np.nan, 2]],
                  names=('ID', 'ra', 'dec', 'z', 'flag'),
                  masked=True)
    print(cat)
    assert len(cat) == 2
    assert cat.masked
    assert cat.colnames == ['ID', 'ra', 'dec', 'z', 'flag']
    assert cat['flag'][0] is np.ma.masked
    assert cat['z'][1] is np.ma.masked
예제 #7
0
def test_from_path(source1, source2, tmpdir):
    with pytest.raises(IOError):
        cat = Catalog.from_path('/not/a/valid/path')

    source1.write(str(tmpdir.join('source1.fits')))
    source2.write(str(tmpdir.join('source2.fits')))
    cat = Catalog.from_path(str(tmpdir))
    assert len(cat) == 2
    # 2 additional columns vs from_sources: FILENAME is added by from_path, and
    # SOURCE_V which was added in the Source.write
    assert len(cat.colnames) == 47

    for name in ('cat.fits', 'cat.csv'):
        filename = str(tmpdir.join(name))
        cat.write(filename)

        c = Catalog.read(filename)
        assert c.colnames == cat.colnames
        assert len(cat) == 2
        assert isinstance(c, Catalog)
예제 #8
0
def test_tods9(tmpdir):
    cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii')
    regfile = str(tmpdir.join('test.reg'))
    cat.to_ds9_regions(regfile)
    with open(regfile) as f:
        assert f.readlines()[:4] == [
            '# Region file format: DS9 astropy/regions\n',
            'fk5\n',
            'circle(63.356106,10.466166,0.000278)\n',
            'circle(63.355404,10.464703,0.000278)\n',
        ]
예제 #9
0
def test_join_meta():
    c1 = Catalog()
    c1['ID'] = np.arange(10, dtype=int)
    c1['RA'] = np.arange(10, dtype=float)
    c1['DEC'] = np.arange(10, dtype=float)
    c1.meta['idname'] = 'ID'
    c1.meta['raname'] = 'RA'

    c2 = Table()
    c2['ID'] = np.arange(15, dtype=int)
    c2['RA'] = np.arange(15, dtype=float)
    c2['dec'] = np.arange(15, dtype=float)
    c2.meta['idname'] = 'ID'
    c2.meta['raname'] = 'RA'
    c2.meta['decname'] = 'dec'

    join = c1.join(c2, keys=['ID'])  # join on id
    assert len(join) == 10
    assert type(join.meta) == type(c1.meta)

    assert join.meta['idname'] == 'ID'
    assert join.meta['raname'] == 'RA_1'
    assert join.meta['raname_1'] == 'RA_1'
    assert join.meta['raname_2'] == 'RA_2'
예제 #10
0
def test_from_sources(source1, source2, fmt, ncols):
    source1.CUBE_V = '0.1'
    source2.CUBE_V = '0.2'
    source1.UCUSTOM = (1000, 'some custom keyword u.Angstrom')
    source2.UCUSTOM = (2000, 'some custom keyword u.Angstrom')
    source1.FCUSTOM = (1000, 'some custom keyword %.2f')
    source2.FCUSTOM = (2000, 'some custom keyword %.2f')
    source1.UFCUSTOM = (1000.1234, 'some custom keyword u.Angstrom %.2f')
    source2.UFCUSTOM = (2000.1234, 'some custom keyword u.Angstrom %.2f')
    lines1 = source1.lines['LINE'].data.copy()
    lines2 = source2.lines['LINE'].data.copy()
    cat = Catalog.from_sources([source1, source2], fmt=fmt)
    assert len(cat) == 2
    assert len(cat.colnames) == ncols
    assert list(cat['ID']) == [1, 32]
    assert list(cat['CUBE_V']) == ['0.1', '0.2']
    assert_array_equal(source1.lines['LINE'].data, lines1)
    assert_array_equal(source2.lines['LINE'].data, lines2)
예제 #11
0
def test_select(minicube):
    cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii')
    im = minicube.mean(axis=0)

    # Note im.shape is (40, 40) and cat has 8 rows all inside the image
    assert len(cat) == 8

    # all sources are in the image
    assert len(cat.select(im.wcs, margin=0)) == 8

    # using a margin removing sources on the edges
    assert len(cat.select(im.wcs, margin=5)) == 4

    # Create a mask with the bottom and left edges masked
    mask = np.ones(im.shape, dtype=bool)
    mask[5:, 5:] = False

    # using a margin removing sources on the edges
    assert len(cat.select(im.wcs, mask=mask)) == 4
    assert len(cat.select(im.wcs, margin=1, mask=mask)) == 4
예제 #12
0
def test_nearest():
    c1 = Catalog()
    c1['RA'] = np.arange(10, dtype=float)
    c1['DEC'] = np.arange(10, dtype=float)

    res = c1.nearest((5 + 1 / 3600, 5 + 1 / 3600))
    assert_almost_equal(list(res[0]), (5.0, 5.0, 1.41), decimal=2)

    res = c1.nearest((5, 5), ksel=2)
    assert len(res) == 2

    pos = SkyCoord(5, 5, unit='deg', frame='fk5')
    res = c1.nearest(pos, ksel=2)
    assert len(res) == 2

    res = c1.nearest(pos.to_string('hmsdms').split(' '), ksel=10, maxdist=6000)
    assert len(res) == 3
예제 #13
0
    def __init__(self,
                 cube=None,
                 cubeProcessed=None,
                 catalog=None,
                 listSources=None,
                 listID=None,
                 params=None):
        """
        Several Input choices are available:
        * List of MUSE Sources : These sources needs a MUSE_CUBE in their cubes extension.
        * Complete cube with a MUSE catalogue -> all Lyman-alpha emitters will be treated
        * Complete cube, catalogue and listID -> all the source of the ID list will be treated
        An already preprocessed cubeProcessed can also by passed along.
        Param: Cube object *cube*, MUSE datacube (optionnal if listSources is defined)
        Param: Cube object *cubeProcessed*, preprocessed datacube
        Param: String *catalog*, filename of a MUSE catalog
        Param: list of Sources object *listSources* (optionnal)
        Param: list of sources IDs *listID*, list of sources to extract from the cube using the catalog.
        Param: objet Params *params*, parameters for the method, if not defined,
        default parameters are used

        """
        if params is None:
            self.params = parameters.Params()
        else:
            self.params = params
        if cube is not None:
            self.cube = cube
        else:
            self.cube = None
        if catalog is not None:
            self.catalog = Catalog.read(catalog)
        if cubeProcessed is not None:
            self.cubeProcessed = cubeProcessed
        else:
            self.cubeProcessed = None

        if listSources is not None:
            self.listSources = listSources

        elif listSources is None and listID is None:
            hdulist = pyfits.open(catalog)
            listID = []
            for k in range(len(hdulist[1].data)):
                if hdulist[1].data[k][1] == 'Lya' and hdulist[1].data[k][
                        4] > 0:  #We get all Lyman alpha with a defined redshift
                    listID.append(hdulist[1].data[k][0])
            self.listSources = []
            for k in listID:
                self.listSources.append(self.sourceFromCatalog(k))

        elif listID is not None:
            self.listSources = []
            for k in listID:
                self.listSources.append(self.sourceFromCatalog(k))

        self.listCorrArr = []
        self.listPvalMap = []
        self.preprocessing = None
        self.postprocessing = None
        self.paramsPreProcess = parameters.ParamsPreProcess()
        self.paramsPostProcess = parameters.ParamsPostProcess()
        self.paramsDetection = parameters.ParamsDetection()
예제 #14
0
def test_origin(caplog, tmpdir):
    """Test the full ORIGIN process."""

    orig = ORIGIN.init(MINICUBE, name='tmp', loglevel='INFO', path=str(tmpdir))
    orig.write()

    origfolder = str(tmpdir.join('tmp'))

    # test that log level is correctly reloaded, then change it
    orig = ORIGIN.load(origfolder)
    assert orig.logger.handlers[0].level == 20
    orig.set_loglevel('DEBUG')
    assert orig.logger.handlers[0].level == 10

    orig.step01_preprocessing()
    assert orig.ima_dct is not None
    assert orig.ima_std is not None
    orig.write()

    orig = ORIGIN.load(origfolder)
    orig.step02_areas(minsize=30, maxsize=60)
    assert orig.param['nbareas'] == 4
    assert list(np.unique(orig.areamap._data)) == [1, 2, 3, 4]
    orig.write()

    orig = ORIGIN.load(origfolder)
    assert orig.param['nbareas'] == 4
    orig.step03_compute_PCA_threshold()
    orig.step04_compute_greedy_PCA()

    # TGLR computing (normalized correlations)
    orig.step05_compute_TGLR(ncpu=1)
    # orig.step05_compute_TGLR(ncpu=1, NbSubcube=2)

    # threshold applied on pvalues
    orig.step06_compute_purity_threshold(purity=0.8)

    # FIXME: threshold is hardcoded for now
    orig.step07_detection(threshold=9.28, segmap=SEGMAP)

    # estimation
    orig.step08_compute_spectra()
    orig.write()

    cat = Catalog.read(str(tmpdir.join('tmp', 'Cat1.fits')))
    subcat = cat[cat['comp'] == 0]
    assert np.all(np.isnan(np.array(subcat['STD'])))
    # Test that the columns mask is correct. To be tested when we switch
    # back to a masked table
    # assert np.all(subcat['T_GLR'].mask == False)
    # assert np.all(subcat['STD'].mask == True)

    # cleaned results
    orig = ORIGIN.load(origfolder, newname='tmp2')
    orig.step09_clean_results()
    orig.write()

    # check that the catalog version was saves
    assert "CAT3_TS" in Catalog.read(
        str(tmpdir.join('tmp2', 'Cat3_lines.fits'))).meta
    assert "CAT3_TS" in Catalog.read(
        str(tmpdir.join('tmp2', 'Cat3_sources.fits'))).meta

    # create masks
    origfolder2 = str(tmpdir.join('tmp2'))
    orig = ORIGIN.load(origfolder2)
    orig.step10_create_masks()
    orig.write()

    # list of source objects
    orig = ORIGIN.load(origfolder2)
    orig.step11_save_sources("0.1")
    orig.step11_save_sources("0.1", n_jobs=2, overwrite=True)

    orig.info()
    with open(orig.logfile) as f:
        log = f.read().splitlines()
        assert '11 Done' in log[-1]

    tbl = orig.timestat(table=True)
    assert len(tbl) == 12
    assert tbl.colnames == ['Step', 'Exec Date', 'Exec Time']

    caplog.clear()
    orig.timestat()
    rec = caplog.records
    assert rec[0].message.startswith('step01_preprocessing executed:')
    assert rec[10].message.startswith('step11_save_sources executed:')
    assert rec[11].message.startswith('*** Total run time:')

    caplog.clear()
    orig.stat()
    assert [rec.message for rec in caplog.records] == [
        'ORIGIN PCA pfa 0.01 Back Purity: 0.80 Threshold: 9.28 '
        'Bright Purity 0.80 Threshold 5.46',
        'Nb of detected lines: 16',
        'Nb of sources Total: 6 Background: 3 Cont: 3',
        'Nb of sources detected in faint (after PCA): 4 in std (before PCA): 2',
    ]

    cat = Catalog.read(str(tmpdir.join('tmp2', 'Cat3_lines.fits')))
    assert len(cat) == 16
    assert max(cat['ID']) == 6

    # test returned sources are valid
    src1 = Source.from_file(
        str(tmpdir.join('tmp2', 'sources', 'source-00001.fits')))
    src2 = Source.from_file(
        str(tmpdir.join('tmp2', 'sources', 'source-00002.fits')))
    # FIXME: check if this test is really useful
    # assert set(sp.shape[0] for sp in src.spectra.values()) == {22, 1100}
    assert {ima.shape for ima in src1.images.values()} == {(25, 25)}
    assert src1.cubes['MUSE_CUBE'].shape == (1100, 25, 25)
    assert "SRC_TS" in src1.header
    assert src1.header["CAT3_TS"] == src2.header["CAT3_TS"]
    assert src1.header["SRC_TS"] == src2.header["SRC_TS"]

    # Cleanup (try to close opened files to avoid warnings)
    for h in orig.logger.handlers:
        h.close()
예제 #15
0
def test_edgedist(minicube):
    cat = Catalog.read(get_data_file('sdetect', 'cat.txt'), format='ascii')
    im = minicube.mean(axis=0)
    ref = [2.29, 0.43, 2.83, 0.19, 2.70, 0.16, 0.11, 1.51]
    assert_almost_equal(cat.edgedist(im.wcs), ref, decimal=2)