Пример #1
0
def test_metadata_merging_new_strategy():
    original_merge_strategies = list(metadata.MERGE_STRATEGIES)

    class MergeNumbersAsList(metadata.MergeStrategy):
        """
        Scalar float or int values are joined in a list.
        """
        types = ((int, float), (int, float))

        @classmethod
        def merge(cls, left, right):
            return [left, right]

    class MergeConcatStrings(metadata.MergePlus):
        """
        Scalar string values are concatenated
        """
        types = (str, str)
        enabled = False

    # Normally can't merge two scalar types
    meta1 = {'k1': 1, 'k2': 'a'}
    meta2 = {'k1': 2, 'k2': 'b'}

    # Enable new merge strategy
    with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
        assert MergeNumbersAsList.enabled
        assert MergeConcatStrings.enabled
        out = merge(meta1, meta2, metadata_conflicts='error')
    assert out['k1'] == [1, 2]
    assert out['k2'] == 'ab'
    assert not MergeNumbersAsList.enabled
    assert not MergeConcatStrings.enabled

    # Confirm the default enabled=False behavior
    with pytest.raises(MergeConflictError):
        merge(meta1, meta2, metadata_conflicts='error')

    # Enable all MergeStrategy subclasses
    with enable_merge_strategies(metadata.MergeStrategy):
        assert MergeNumbersAsList.enabled
        assert MergeConcatStrings.enabled
        out = merge(meta1, meta2, metadata_conflicts='error')
    assert out['k1'] == [1, 2]
    assert out['k2'] == 'ab'
    assert not MergeNumbersAsList.enabled
    assert not MergeConcatStrings.enabled

    metadata.MERGE_STRATEGIES = original_merge_strategies
Пример #2
0
def test_metadata_merging_new_strategy():
    original_merge_strategies = list(metadata.MERGE_STRATEGIES)

    class MergeNumbersAsList(metadata.MergeStrategy):
        """
        Scalar float or int values are joined in a list.
        """
        types = ((int, float), (int, float))

        @classmethod
        def merge(cls, left, right):
            return [left, right]

    class MergeConcatStrings(metadata.MergePlus):
        """
        Scalar string values are concatenated
        """
        types = (str, str)
        enabled = False

    # Normally can't merge two scalar types
    meta1 = {'k1': 1, 'k2': 'a'}
    meta2 = {'k1': 2, 'k2': 'b'}

    # Enable new merge strategy
    with enable_merge_strategies(MergeNumbersAsList, MergeConcatStrings):
        assert MergeNumbersAsList.enabled
        assert MergeConcatStrings.enabled
        out = merge(meta1, meta2, metadata_conflicts='error')
    assert out['k1'] == [1, 2]
    assert out['k2'] == 'ab'
    assert not MergeNumbersAsList.enabled
    assert not MergeConcatStrings.enabled

    # Confirm the default enabled=False behavior
    with pytest.raises(MergeConflictError):
        merge(meta1, meta2, metadata_conflicts='error')

    # Enable all MergeStrategy subclasses
    with enable_merge_strategies(metadata.MergeStrategy):
        assert MergeNumbersAsList.enabled
        assert MergeConcatStrings.enabled
        out = merge(meta1, meta2, metadata_conflicts='error')
    assert out['k1'] == [1, 2]
    assert out['k2'] == 'ab'
    assert not MergeNumbersAsList.enabled
    assert not MergeConcatStrings.enabled

    metadata.MERGE_STRATEGIES = original_merge_strategies
Пример #3
0
def hstack_catalogs(catalogs, output_cat):
    cats = []
    for cat in catalogs:
        cats.append(Table.read(cat))
    with enable_merge_strategies(MergeNumbersAsList):
        full_cat = hstack(cats)

    print('Writing full catalog to ' + output_cat)
    full_cat.write(output_cat, format='fits', overwrite=True)

    return output_cat
Пример #4
0
    def __call__(self, photons):
        areas = u.Quantity([e.area for e in self.elements])
        aperid = np.digitize(np.random.rand(len(photons)), np.cumsum(areas) / self.area)

        # Add ID number to ID col, if requested
        if self.id_col is not None:
            photons[self.id_col] = aperid
        outs = []
        for i, elem in enumerate(self.elements):
            thisphot = photons[aperid == i]
            for p in self.preprocess_steps:
                p(thisphot)
            thisphot = elem(thisphot)
            for p in self.postprocess_steps:
                p(thisphot)
            outs.append(thisphot)
        with enable_merge_strategies(utils.MergeIdentical):
            photons = table.vstack(outs)

        return photons
Пример #5
0
def load_patch(butler_or_repo,
               tract,
               patch,
               fields_to_join=('id', ),
               filters={
                   'u': 'u',
                   'g': 'g',
                   'r': 'r',
                   'i': 'i',
                   'z': 'z',
                   'y': 'y'
               },
               trim_colnames_for_fits=False,
               verbose=False):
    """Load patch catalogs.  Return merged catalog across filters.

    butler_or_repo: Butler object or str
        Either a Butler object or a filename to the repo
    tract: int
        Tract in skymap
    patch: str
        Patch in the tract in the skymap
    fields_to_join: iterable of str
        Join the catalogs for each filter on these fields
    filters: iterable of str
        Filter names to load
    trim_colnames_for_fits: bool
        Trim column names to satisfy the FITS standard character limit of <68.

    Returns
    --
    AstroPy Table of patch catalog merged across filters.
    """
    if isinstance(butler_or_repo, str):
        butler = Butler(butler_or_repo)
    else:
        butler = butler_or_repo

    # Define the filters and order in which to sort them.:
    tract_patch_data_id = {'tract': tract, 'patch': patch}
    try:
        ref_table = butler.get(datasetType='deepCoadd_ref',
                               dataId=tract_patch_data_id).asAstropy()
    except NoResults as e:
        if verbose:
            print(" ", e)
        return Table()

    isPrimary = ref_table['detect_isPrimary']
    ref_table = ref_table[isPrimary]
    if len(ref_table) == 0:
        if verbose:
            print("  No good isPrimary entries for tract %d, patch %s" %
                  (tract, patch))
        return ref_table

    merge_filter_cats = {}
    for filt in filters:
        this_data = tract_patch_data_id.copy()
        this_data['filter'] = filters[filt]
        try:
            cat = butler.get(datasetType='deepCoadd_forced_src',
                             dataId=this_data).asAstropy()
        except NoResults as e:
            if verbose:
                print(" ", e)
            continue

        CoaddCalib = butler.get('deepCoadd_calexp_calib', this_data)
        CoaddCalib.setThrowOnNegativeFlux(False)

        mag, mag_err = CoaddCalib.getMagnitude(cat['base_PsfFlux_flux'],
                                               cat['base_PsfFlux_fluxSigma'])

        cat['mag'] = mag
        cat['mag_err'] = mag_err
        cat['SNR'] = np.abs(
            cat['base_PsfFlux_flux']) / cat['base_PsfFlux_fluxSigma']

        cat = cat[isPrimary]

        merge_filter_cats[filt] = cat

    merged_patch_cat = ref_table
    for filt in filters:
        if filt not in merge_filter_cats:
            continue

        cat = merge_filter_cats[filt]
        if len(cat) < 1:
            continue
        # Rename duplicate columns with prefix of filter
        prefix_columns(cat, filt, fields_to_skip=fields_to_join)
        # Merge metadata with concatenation
        with enable_merge_strategies(MergeNumbersAsList,
                                     MergeListNumbersAsList):
            merged_patch_cat = join(merged_patch_cat, cat, keys=fields_to_join)

    if trim_colnames_for_fits:
        # FITS column names can't be longer that 68 characters
        # Trim here to ensure consistency across any format we write this out to
        trim_long_colnames(merged_patch_cat)

    return merged_patch_cat
Пример #6
0
    '/melkor/d1/guenther/Dropbox/REDSoX File Transfers/raytrace/inputdata/mk421_spec.txt',
    format='ascii.no_header',
    names=['wave', 'fluxperwave'])
spectrum['energy'] = 1.2398419292004202e-06 / (spectrum['wave'] * 1e-7)
spectrum['flux'] = spectrum['fluxperwave'] / 12.398419292004202 * spectrum[
    'wave']**2
spectrum.sort('energy')
# Now limit to the range where I have coefficients for gratings etc.
spectrum = spectrum[(spectrum['wave'] > 25.) & (spectrum['wave'] < 75.)]
flux = np.sum(spectrum['flux'][1:] * np.diff(spectrum['energy']))
my_sourcepol = PointSource(coords=SkyCoord(30., 30., unit='deg'),
                           energy=spectrum,
                           flux=0.2 * flux,
                           polarization=120.,
                           geomarea=Ageom)
my_sourceunpol = PointSource(coords=SkyCoord(30., 30., unit='deg'),
                             energy=spectrum,
                             flux=0.8 * flux,
                             geomarea=Ageom)
ppol = my_sourcepol.generate_photons(300)
punpol = my_sourceunpol.generate_photons(300)
with enable_merge_strategies(utils.MergeIdentical):
    photons = vstack([ppol, punpol])

photons = mypointing(photons)
len(photons)

photons = redsox.redsox(photons)
photons.write('/melkor/d1/guenther/projects/REDSoX/sims/photons_spectrum.fits',
              overwrite=True)