Beispiel #1
0
def test_geomarea_projection():
    '''When a ray sees the aperture under an angle the projected aperture size
    is smaller. This is accounted for by reducing the probability of this photon.'''
    photons = Table()
    photons['ra'] = [0., 45., 90., 135., 315.]
    photons['dec'] = np.zeros(5)
    photons['origin_coord'] = SkyCoord(photons['ra'],
                                       photons['dec'],
                                       unit='deg')
    photons['probability'] = np.ones(5)
    photons['time'] = np.arange(5)
    photons['polangle'] = np.zeros(5)

    fp = FixedPointing(coords=SkyCoord(0., 0., unit='deg'))
    photons = fp(photons)
    aper = RectangleAperture()
    p = aper(photons.copy())

    assert np.allclose(p['probability'],
                       [1., 1. / np.sqrt(2), 0, 0, 1. / np.sqrt(2)])

    orientation = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])
    aper = RectangleAperture(orientation=orientation)
    p = aper(photons.copy())

    assert np.allclose(p['probability'],
                       [0, 1. / np.sqrt(2), 1., 1. / np.sqrt(2), 0.])
Beispiel #2
0
def test_pre_post_process():
    '''test pre-processing and post-processing in sequences'''

    def set_energy2(photons):
        photons['energy'] = 2
        return photons

    def set_energy3(photons):
        photons['energy'] = 3
        return photons

    def process(photons):
        photons.meta['mean_en'].append(photons['energy'].mean())

    photons = Table({'energy': [1,1,1]})
    photons.meta['mean_en'] = []

    seq_pre = Sequence(elements=[set_energy2, set_energy3], preprocess_steps=[process])
    tpre = seq_pre(photons.copy())
    assert np.all(tpre['energy'] == 3)
    assert np.all(tpre.meta['mean_en'] == [1, 2])

    seq_post = Sequence(elements=[set_energy2, set_energy3], postprocess_steps=[process])
    tpost = seq_post(photons.copy())
    assert np.all(tpost['energy'] == 3)
    assert np.all(tpost.meta['mean_en'] == [2, 3])
Beispiel #3
0
def test_CAT_order_convention():
    dirs = np.array([[-1, 0., 0., 0], [-1, 0.01, -0.01, 0],
                     [-1, 0.01, 0.01, 0], [-1, -0.01, 0.01, 0],
                     [-1, -0.01, -0.01, 0]])
    photons = Table({
        'pos': np.ones((5, 4)),
        'dir': dirs,
        'energy': np.ones(5),
        'polarization': np.ones(5),
        'probability': np.ones(5),
    })
    gp = CATGrating(d=1. / 5000,
                    order_selector=constant_order_factory(5),
                    zoom=2)
    p5 = gp.process_photons(photons.copy())
    gm = CATGrating(d=1. / 5000,
                    order_selector=constant_order_factory(-5),
                    zoom=2)
    m5 = gm.process_photons(photons.copy())
    for g in [gm, gp]:
        assert np.all(
            g.order_sign_convention(h2e(photons['dir'])) == np.array(
                [1, -1, -1, 1, 1]))
    assert np.all(p5['dir'][1:3, 1] > 0)
    assert np.all(p5['dir'][3:, 1] < 0)
    assert np.all(m5['dir'][1:3, 1] < 0)
    assert np.all(m5['dir'][3:, 1] > 0)
Beispiel #4
0
def make_mtl(targets, zcat=None, trim=True):
    '''
    Adds NUMOBS, PRIORITY, and GRAYLAYER columns to a targets table
    
    Args:
        targets : Table with columns TARGETID, DESI_TARGET

    Optional:
        zcat : redshift catalog table with columns TARGETID, NUMOBS, Z, ZWARN
        trim: if True (default), don't include targets that don't need
            any more observations.  If False, include every input target.
    
    Returns:
        MTL Table with targets columns plus
          * NUMOBS_MORE - number of additional observations requested
          * PRIORITY - target priority (larger number = higher priority)
          * GRAYLAYER - can this be observed during gray time?

    TODO:
        Check if input targets is ever altered (ist shouldn't...)
    '''
    n = len(targets)
    targets = Table(targets)
    if zcat is not None:
        ztargets = join(targets, zcat, keys='TARGETID', join_type='outer')
        if ztargets.masked:
            unobs = ztargets['NUMOBS'].mask
            ztargets['NUMOBS'][unobs] = 0
    else:
        ztargets = targets.copy()
        ztargets['NUMOBS'] = np.zeros(n, dtype=np.int32)
        ztargets['Z'] = -1 * np.ones(n, dtype=np.float32)
        ztargets['ZWARN'] = -1  * np.ones(n, dtype=np.int32)
    
    ztargets['NUMOBS_MORE'] = np.maximum(0, calc_numobs(ztargets) - ztargets['NUMOBS'])

    mtl = targets.copy()
    mtl['NUMOBS_MORE'] = ztargets['NUMOBS_MORE']
    mtl['PRIORITY'] = calc_priority(ztargets)

    #- ELGs can be observed during gray time
    graylayer = np.zeros(n, dtype='i4')
    iselg = (mtl['DESI_TARGET'] & desi_mask.ELG) != 0
    graylayer[iselg] = 1
    mtl['GRAYLAYER'] = graylayer

    if trim:
        notdone = mtl['NUMOBS_MORE'] > 0
        mtl = mtl[notdone]
    
    return mtl
Beispiel #5
0
def _create_cigale_in(photom_cat: Table,
                      zmin: float = 0.01,
                      zmax: float = 0.35,
                      n_z: int = 35,
                      cigale_input: str = "cigin_minz_zfrb.fits") -> Table:
    """
    Take the photometry table and
    create a new table with redshifts.
    For each galaxy, create multiple entries
    with different redshifts from 0 to 2.
    These redshifts will be uniformly spaced.
    Args:
        photom_cat (Table): Photometry catalog
        zmin (float, optional): Minimum redshift for analysis.
        zmax (float, optional): Maximum redshift for analysis.
        n_z (int, optional): Number of redshift grid points.
        cigale_input (str, optional): Name of input file to be produced.
    Returns:
        stacked_photom (Table): A table with multiple groups, one for each galaxy.
            Each entry in a group has the same photometry but different redshift values.
            This way, CIGALE can be run on the same galaxy at multiple redshift guesses
            in one go.
    """
    # Define z values
    z_range = np.linspace(zmin, zmax, n_z)

    photom_cat['redshift'] = z_range[0]  # Set up initial redshift value
    photom_cat['ID'] = photom_cat['ID'].astype(str)  # Convert form int to str
    photom_cat.sort("separation")
    photom_cat['ID'] = [
        ID.zfill(5) + "_{:0.2f}".format(z_range[0]) for ID in photom_cat['ID']
    ]

    # Create new table
    stacked_photom = photom_cat.copy()
    for z in z_range[1:]:
        newphotom = photom_cat.copy()
        newphotom['redshift'] = z
        for entry in newphotom:
            entry['ID'] = entry['ID'].replace("_0.01", "_{:0.2f}".format(z))
        stacked_photom = vstack([stacked_photom, newphotom])

    # Sort table by ID
    stacked_photom = stacked_photom.group_by('ID')

    # Write to disk
    stacked_photom.write(cigale_input, overwrite=True)
    print("Wrote to disk {:s}".format(cigale_input))
    return stacked_photom
Beispiel #6
0
def test_zeros_order():
    '''Photons diffracted into order 0 should just pass through'''
    photons = Table({
        'pos': random((10, 4)) * 10 - 5,
        'dir': random((10, 4)) * 10 - 5,
        'energy': random(10),
        'polarization': random(10),
        'probability': np.ones(10),
    })
    # Make sure homogeneous coordiantes are valid
    # and infall is normal
    photons['pos'][:, 3] = 1
    photons['dir'][:, 1:] = 0

    p = photons.copy()

    def mock_order(x, y, z):
        return np.zeros_like(x, dtype=np.int), np.ones_like(x)

    g0 = FlatGrating(d=1. / 500.,
                     order_selector=mock_order,
                     zoom=np.array([1., 5., 5.]))
    p = g0.process_photons(p)
    # Direction unchanged
    d_in = h2e(photons['dir'])
    d_out = h2e(p['dir'])
    # normalize
    d_in = d_in / np.sqrt(np.sum(d_in**2, axis=-1))[:, None]
    d_out = d_out / np.sqrt(np.sum(d_out**2, axis=-1))[:, None]
    assert np.allclose(d_in, d_out)
    # all intersection points in y-z plane
    assert np.allclose(p['pos'][:, 0], 0.)
    # no offset between old and new ray
    assert np.allclose(
        np.cross(h2e(photons['pos']) - h2e(p['pos']), h2e(p['dir'])), 0)
Beispiel #7
0
	def __init__(self,stripeindex=None):
 		if stripeindex == None:
 			BCfile = MISTFILE_default
 		else:
 			BCfile = '/n/regal/conroy_lab/pac/MISTFILES/MIST_full_{0}.h5'.format(stripeindex)

 		# read in MIST hdf5 table
 		MISTh5 = h5py.File(BCfile,'r')
 		# determine the BC datasets
 		BCTableList = [x for x in MISTh5.keys() if x[:3] == 'BC_']
 		# read in each BC dataset and pull the photometric information
 		for BCT in BCTableList:
	 		BCTABLE = Table(np.array(MISTh5[BCT]))
			if BCT == BCTableList[0]:
				BC = BCTABLE.copy()
			else:
				BCTABLE.remove_columns(['Teff', 'logg', '[Fe/H]', 'Av', 'Rv'])
				BC = hstack([BC,BCTABLE])

 		BC_AV0 = BC[BC['Av'] == 0.0]

		self.bands = BC.keys()
		[self.bands.remove(x) for x in ['Teff', 'logg', '[Fe/H]', 'Av', 'Rv']]

		self.redintr = LinearNDInterpolator(
			(BC['Teff'],BC['logg'],BC['[Fe/H]'],BC['Av']),
			np.stack([BC[bb] for bb in self.bands],axis=1),
			rescale=True
			)
		self.redintr_0 = LinearNDInterpolator(
			(BC_AV0['Teff'],BC_AV0['logg'],BC_AV0['[Fe/H]']),
			np.stack([BC_AV0[bb] for bb in self.bands],axis=1),
			rescale=True
			)
Beispiel #8
0
def test_zeros_order():
    '''Photons diffracted into order 0 should just pass through'''
    photons = Table({'pos': random((10, 4)) * 10 - 5,
                     'dir': random((10, 4)) * 10 - 5,
                     'energy': random(10),
                     'polarization': random(10),
                     'probability': np.ones(10),
                     })
    # Make sure homogeneous coordiantes are valid
    # and infall is normal
    photons['pos'][:, 3] = 1
    photons['dir'][:, 1:] = 0

    p = photons.copy()
    def mock_order(x, y, z):
        return np.zeros_like(x, dtype=np.int), np.ones_like(x)
    g0 = FlatGrating(d=1./500.,
                     order_selector=mock_order,
                     zoom=np.array([1., 5., 5.]))
    p = g0.process_photons(p)
    # Direction unchanged
    d_in = h2e(photons['dir'])
    d_out = h2e(p['dir'])
    # normalize
    d_in =  d_in / np.sqrt(np.sum(d_in**2, axis=-1))[:, None]
    d_out =  d_out / np.sqrt(np.sum(d_out**2, axis=-1))[:, None]
    assert np.allclose(d_in, d_out)
    # all intersection points in y-z plane
    assert np.allclose(p['pos'][:, 0], 0.)
    # no offset between old and new ray
    assert np.allclose(np.cross(h2e(photons['pos']) - h2e(p['pos']), h2e(p['dir'])), 0)
def main():
    path = "."
    pattern = ""
    if len(sys.argv) == 3:
        path = sys.argv[1]
        pattern = sys.argv[2]
    else:
        pattern = sys.argv[1]

    for file in os.listdir(path):
        if fnmatch.fnmatch(file, pattern):
            data = Table(Table.read(os.path.join(path, file), format="ascii"), masked=True)
            orig_mask = (data['ivar'] == 0)
            data.mask = [(data['ivar'] == 0)]*len(data.columns)
            idstr = file[:file.rfind('.')]

            test_data = data.copy()
            test_data['flux'] = minimize(test_data['wavelength'], test_data['flux'], [100, 200, 300], 0, start_ind=split_noisy_app)

            filtered = filter_and_subtract(test_data['flux'], test_data['wavelength'], 201, 24)
            test_data['flux'] = np.ma.min(np.ma.vstack([test_data['flux'], filtered]), axis=0)

            filtered = filter_and_subtract(test_data['flux'], test_data['wavelength'], 161, 18)
            test_data['flux'] = np.ma.min(np.ma.vstack([test_data['flux'], filtered]), axis=0)

            continuum = split_spectrum(test_data['wavelength'], test_data['flux'])
            wo_continuum = data['flux'] - continuum

            save_data(data['wavelength'], wo_continuum, continuum, data['ivar'], orig_mask, idstr)
Beispiel #10
0
	def __init__(self,stripeindex=None):
 		if stripeindex == None:
 			BCfile = MISTFILE_default
 		else:
 			BCfile = '/n/regal/conroy_lab/pac/MISTFILES/MIST_full_{0}.h5'.format(stripeindex)

 		# read in MIST hdf5 table
 		MISTh5 = h5py.File(BCfile,'r')
 		# determine the BC datasets
 		BCTableList = [x for x in MISTh5.keys() if x[:3] == 'BC_']
 		# read in each BC dataset and pull the photometric information
 		for BCT in BCTableList:
	 		BCTABLE = Table(np.array(MISTh5[BCT]))
			if BCT == BCTableList[0]:
				BC = BCTABLE.copy()
			else:
				BCTABLE.remove_columns(['Teff', 'logg', '[Fe/H]', 'Av', 'Rv'])
				BC = hstack([BC,BCTABLE])

 		BC_AV0 = BC[BC['Av'] == 0.0]

		self.bands = BC.keys()
		[self.bands.remove(x) for x in ['Teff', 'logg', '[Fe/H]', 'Av', 'Rv']]

		self.redintr = LinearNDInterpolator(
			(BC['Teff'],BC['logg'],BC['[Fe/H]'],BC['Av']),
			np.stack([BC[bb] for bb in self.bands],axis=1),
			rescale=True
			)
		self.redintr_0 = LinearNDInterpolator(
			(BC_AV0['Teff'],BC_AV0['logg'],BC_AV0['[Fe/H]']),
			np.stack([BC_AV0[bb] for bb in self.bands],axis=1),
			rescale=True
			)
Beispiel #11
0
def remove_duplicates(tab: Table, idcol: str) -> Table:
    """
    In an astropy table if there are duplicate
    entries, remove the duplicates. Generally,
    these will be duplicate objects (i.e. multiple
    observations of same object ID or the same
    entry repeated multiple times from cross-matching.)

    Args:
        tab (Table): A table of entries.
        idcol (str): A column name that has unique ids
            for each table entry.
    Returns:
        unique_tab (Table): A table with only the unique ids.
    """
    unique_tab = tab.copy()
    assert isinstance(unique_tab, Table), "Please provide an astropy table."
    assert isinstance(idcol, str), "Please provide a valid column name."
    assert idcol in tab.colnames, "{} not a column in the given table".format(
        idcol)
    # Sort entries first.
    unique_tab.sort(idcol)
    # Get the duplicates.
    duplicate_ids = np.where(
        unique_tab[1:][idcol] == unique_tab[:-1][idcol])[0] + 1
    unique_tab.remove_rows(duplicate_ids)
    return unique_tab
Beispiel #12
0
def test_zeros_order():
    '''Photons diffracted into order 0 should just pass through'''
    dir = random((10, 4))
    dir[0, :] = dir[0, :] * 10  # make sure mostion is mostly along x so they hit grating
    photons = Table({'pos': random((10, 4)) - 1,
                     'dir': dir,
                     'energy': random(10),
                     'polarization': polarization_vectors(dir, np.random.rand(10)),
                     'probability': np.ones(10),
                     })
    # Make sure homogeneous coordiantes are valid
    # and infall is normal
    photons['pos'][:, 3] = 1
    photons['dir'][:, 1:] = 0

    p = photons.copy()
    g0 = FlatGrating(d=1./500.,
                     order_selector=OrderSelector([0]),
                     zoom=np.array([1., 50., 50.]))
    p = g0(p)
    # Direction unchanged
    d_in = h2e(photons['dir'])
    d_out = h2e(p['dir'])
    # normalize
    d_in =  d_in / np.sqrt(np.sum(d_in**2, axis=-1))[:, None]
    d_out =  d_out / np.sqrt(np.sum(d_out**2, axis=-1))[:, None]
    assert np.allclose(d_in, d_out)
    # all intersection points in y-z plane
    assert np.allclose(p['pos'][:, 0], 0.)
    # no offset between old and new ray
    assert np.allclose(np.cross(h2e(photons['pos']) - h2e(p['pos']), h2e(p['dir'])), 0)
Beispiel #13
0
def test_psf_photometry_oneiter(sigma_psf, sources):
    """
    Tests in an image with a group of two overlapped stars and an
    isolated one.
    """

    img_shape = (32, 32)
    # generate image with read-out noise (Gaussian) and
    # background noise (Poisson)
    image = (make_gaussian_sources(img_shape, sources) +
             make_noise_image(img_shape, type='poisson', mean=6.,
                              random_state=1) +
             make_noise_image(img_shape, type='gaussian', mean=0.,
                              stddev=2., random_state=1))
    cp_image = image.copy()

    sigma_clip = SigmaClip(sigma=3.)
    bkgrms = StdBackgroundRMS(sigma_clip)
    std = bkgrms(image)
    phot_objs = make_psf_photometry_objs(std, sigma_psf)

    for phot_proc in phot_objs:
        result_tab = phot_proc(image)
        residual_image = phot_proc.get_residual_image()
        assert_allclose(result_tab['x_fit'], sources['x_mean'], rtol=1e-1)
        assert_allclose(result_tab['y_fit'], sources['y_mean'], rtol=1e-1)
        assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)
        assert_array_equal(result_tab['id'], sources['id'])
        assert_array_equal(result_tab['group_id'], sources['group_id'])
        assert_allclose(np.mean(residual_image), 0.0, atol=1e1)

        # test fixed photometry
        phot_proc.psf_model.x_0.fixed = True
        phot_proc.psf_model.y_0.fixed = True

        pos = Table(names=['x_0', 'y_0'], data=[sources['x_mean'],
                                                sources['y_mean']])
        cp_pos = pos.copy()

        result_tab = phot_proc(image, pos)
        residual_image = phot_proc.get_residual_image()

        assert_array_equal(result_tab['x_fit'], sources['x_mean'])
        assert_array_equal(result_tab['y_fit'], sources['y_mean'])
        assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)
        assert_array_equal(result_tab['id'], sources['id'])
        assert_array_equal(result_tab['group_id'], sources['group_id'])
        assert_allclose(np.mean(residual_image), 0.0, atol=1e1)

        # make sure image is note overwritten
        assert_array_equal(cp_image, image)

        # make sure initial guess table is not modified
        assert_array_equal(cp_pos, pos)

        # resets fixed positions
        phot_proc.psf_model.x_0.fixed = False
        phot_proc.psf_model.y_0.fixed = False
Beispiel #14
0
    def _likelihood_ratio(self, pidx, sidx, d2d):
        """
        Estimates the likelihood ratio for all counterparts and for each
        magnitude band.

        Parameters
        ----------
        pidx : numpy ``ndarray``
            Indexes of the primary sources with counterparts in the
            secondary catalogue.
        sidx : numpy ``ndarray``
            Indexes of the counterparts in the secondary catalogue.
        d2d : numpy ``ndarray``
            Distance between the primary source and the counterpart in the
            secondary catalogue.

        Return
        ------
        lr : Astropy ``Table``
            Table with the likelihood ratios for each counterpart and each
            available magnitude in the secondary catalogue.
        """
        pcat_idcol = 'SRCID_{}'.format(self.pcat.name)
        scat_idcol = 'SRCID_{}'.format(self.scat.name)
        drcol = 'Separation_{}_{}'.format(self.pcat.name, self.scat.name)

        lr = Table()
        lr[pcat_idcol] = self.pcat.ids[pidx]
        lr[scat_idcol] = self.scat.ids[sidx]
        lr[drcol] = d2d.to(u.arcsec)

        #print(self.pcat.ids[pidx])
        #print(self.scat.ids[sidx])
        #print(d2d.to(u.arcsec))

        # AGE: if cata has no ids it fails with lr['ncat']=2
        lr['ncat'] = [2] * len( lr[pcat_idcol] )
        lr['PEF'] = self._pos_err_function(d2d, pidx, sidx)

        #self._qnterms(lr, self.scat.mags[sidx])
        self._lrND(lr, self.scat.mags[sidx])

        # For estimating the reliability, the table has to be grouped.
        # Note that this changes the order of the rows!!!
        lr = lr.group_by(pcat_idcol)

        self._reliabilityND(lr)

        self._p_any_bestND(lr)
        lr_all = lr.copy()

        final_columns = [pcat_idcol, scat_idcol, drcol, 'ncat',
                         'LR_BEST', 'REL_BEST', 'LR_BEST_MAG',
                         'prob_has_match', 'prob_this_match',
                         ]
        lr.keep_columns(final_columns)

        return lr, lr_all
Beispiel #15
0
    def stats_rndmatch(self,
                       match,
                       ntest=1,
                       ncutoff=101,
                       mincutoff=0.0,
                       maxcutoff=10.0,
                       plot_to_file=None,
                       **kwargs):
        """
        Calculates match statistics (completness and reliability), using a
        random match, for a range of LR thresholds. This can be used later to
        select the optimal threshold.
        """
        fstats = None
        for _ in range(ntest):
            match_rnd = self._match_rndcat(**kwargs)

            # TODO: LR ouput should be improved to simplify these selection masks
            mask = np.logical_and(match['ncat'] == 2, match['match_flag'] == 1)
            p_any0 = match[self._cutoff_column][mask]

            # Add sources with no matches
            size = len(np.where(match['ncat'] == 1)[0]) - len(p_any0)
            p_any0 = np.concatenate((np.array(p_any0), np.zeros(size)))

            mask = np.logical_and(match_rnd['ncat'] == 2,
                                  match_rnd['match_flag'] == 1)
            p_any0_offset = match_rnd[self._cutoff_column][mask]

            # Add sources with no matches
            size = len(
                np.where(match_rnd['ncat'] == 1)[0]) - len(p_any0_offset)
            p_any0_offset = np.concatenate(
                (np.array(p_any0_offset), np.zeros(size)))

            cutoffs = np.linspace(mincutoff, maxcutoff, num=ncutoff)

            stats = Table()
            stats['cutoff'] = cutoffs
            stats['completeness'] = [(p_any0 > c).mean() for c in cutoffs]
            stats['error_rate'] = [(p_any0_offset > c).mean() for c in cutoffs]
            stats['reliability'] = 1 - stats['error_rate']
            stats['CR'] = stats['completeness'] + stats['reliability']

            if fstats is None:
                fstats = stats.copy()
            else:
                for col in fstats.colnames[1:]:
                    fstats[col] += stats[col]

        for col in fstats.colnames[1:]:
            fstats[col] = fstats[col] / ntest

        if plot_to_file is not None:
            self._plot_stats(fstats, plot_to_file)

        return fstats
Beispiel #16
0
def run_photometry(
    image: np.ndarray,
    input_table: Table,
    filename: str = '?',
    config=Config.instance()) -> Union[PhotometryResult, str]:
    """
    apply EPSF fitting photometry to a testimage

    :param input_table:
    :param image:
    :param filename:
    :param config: instance of Config containing all processing parameters
    :return: PhotometryResult, (image, input_table, result_table, epsf, star_guesses)
    """

    print(f'starting job on image {filename} with {config}')

    finder = get_finder(image, config)

    # TODO should this also be done using the catalogue positions?
    # TODO can we somehow sort the stars according to usefulness?
    star_guesses = make_stars_guess(
        image, finder, cutout_size=config.cutout_size)[:config.max_epsf_stars]

    if len(star_guesses) < config.max_epsf_stars:
        print('Warning: found less stars than config.stars_to_keep')

    epsf = make_epsf_fit(star_guesses,
                         iters=config.epsfbuilder_iters,
                         oversampling=config.oversampling,
                         smoothing_kernel=config.smoothing,
                         epsf_guess=config.epsf_guess)

    if config.use_catalogue_positions:
        guess_table = input_table.copy()
        guess_table = cut_edges(guess_table, config.cutout_size,
                                image.shape[0])
        guess_table.rename_columns(['x', 'y'], ['x_0', 'y_0'])
        guess_table['x_0'] += np.random.uniform(-0.2,
                                                +0.2,
                                                size=len(guess_table['x_0']))
        guess_table['y_0'] += np.random.uniform(-0.2,
                                                +0.2,
                                                size=len(guess_table['y_0']))
    else:
        guess_table = None

    result_table = do_photometry_epsf(image,
                                      epsf,
                                      finder,
                                      initial_guess=guess_table,
                                      config=config)
    result_table['m'] = flux_to_magnitude(result_table['flux_fit'])

    return PhotometryResult(image, input_table, result_table, epsf,
                            star_guesses, config, filename)
Beispiel #17
0
def test_psf_photometry_oneiter(sigma_psf, sources):
    """
    Tests in an image with a group of two overlapped stars and an
    isolated one.
    """

    img_shape = (32, 32)
    # generate image with read-out noise (Gaussian) and
    # background noise (Poisson)
    image = (make_gaussian_sources(img_shape, sources) + make_noise_image(
        img_shape, type='poisson', mean=6., random_state=1) + make_noise_image(
            img_shape, type='gaussian', mean=0., stddev=2., random_state=1))
    cp_image = image.copy()

    sigma_clip = SigmaClip(sigma=3.)
    bkgrms = StdBackgroundRMS(sigma_clip)
    std = bkgrms(image)
    phot_objs = make_psf_photometry_objs(std, sigma_psf)

    for phot_proc in phot_objs:
        result_tab = phot_proc(image)
        residual_image = phot_proc.get_residual_image()
        assert_allclose(result_tab['x_fit'], sources['x_mean'], rtol=1e-1)
        assert_allclose(result_tab['y_fit'], sources['y_mean'], rtol=1e-1)
        assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)
        assert_array_equal(result_tab['id'], sources['id'])
        assert_array_equal(result_tab['group_id'], sources['group_id'])
        assert_allclose(np.mean(residual_image), 0.0, atol=1e1)

        # test fixed photometry
        phot_proc.psf_model.x_0.fixed = True
        phot_proc.psf_model.y_0.fixed = True

        pos = Table(names=['x_0', 'y_0'],
                    data=[sources['x_mean'], sources['y_mean']])
        cp_pos = pos.copy()

        result_tab = phot_proc(image, pos)
        residual_image = phot_proc.get_residual_image()

        assert_array_equal(result_tab['x_fit'], sources['x_mean'])
        assert_array_equal(result_tab['y_fit'], sources['y_mean'])
        assert_allclose(result_tab['flux_fit'], sources['flux'], rtol=1e-1)
        assert_array_equal(result_tab['id'], sources['id'])
        assert_array_equal(result_tab['group_id'], sources['group_id'])
        assert_allclose(np.mean(residual_image), 0.0, atol=1e1)

        # make sure image is not overwritten
        assert_array_equal(cp_image, image)

        # make sure initial guess table is not modified
        assert_array_equal(cp_pos, pos)

        # resets fixed positions
        phot_proc.psf_model.x_0.fixed = False
        phot_proc.psf_model.y_0.fixed = False
Beispiel #18
0
def test_order_convention():
    dirs = np.zeros((3, 4))
    dirs[1, 2] = 0.01
    dirs[2, 2] = -0.01
    dirs[:, 0] = - 1
    photons = Table({'pos': np.ones((3, 4)),
                     'dir': dirs,
                     'energy': np.ones(3),
                     'polarization': polarization_vectors(dirs, np.ones(3)),
                     'probability': np.ones(3),
                     })
    gp = FlatGrating(d=1./500, order_selector=OrderSelector([1]), zoom=2)
    p1 = gp(photons.copy())
    gm = FlatGrating(d=1./500, order_selector=OrderSelector([-1]), zoom=2)
    m1 = gm(photons.copy())
    assert np.all(p1['order'] == 1)
    assert np.all(m1['order'] == -1)
    # intersection point with grating cannot depend on order
    assert np.all(p1['pos'].data == m1['pos'].data)
Beispiel #19
0
def test_order_convention():
    dirs = np.zeros((3, 4))
    dirs[1, 2] = 0.01
    dirs[2, 2] = -0.01
    dirs[:, 0] = -1
    photons = Table({
        'pos': np.ones((3, 4)),
        'dir': dirs,
        'energy': np.ones(3),
        'polarization': polarization_vectors(dirs, np.ones(3)),
        'probability': np.ones(3),
    })
    gp = FlatGrating(d=1. / 500, order_selector=OrderSelector([1]), zoom=2)
    p1 = gp(photons.copy())
    gm = FlatGrating(d=1. / 500, order_selector=OrderSelector([-1]), zoom=2)
    m1 = gm(photons.copy())
    assert np.all(p1['order'] == 1)
    assert np.all(m1['order'] == -1)
    # intersection point with grating cannot depend on order
    assert np.all(p1['pos'].data == m1['pos'].data)
Beispiel #20
0
def test_CAT_order_convention():
    dirs = np.zeros((3, 4))
    dirs[:, 0] = -1.
    dirs[1, 2]= 0.01
    dirs[2, 2]= -0.01
    photons = Table({'pos': np.ones((3, 4)),
                     'dir': dirs,
                     'energy': np.ones(3),
                     'polarization': np.ones(3),
                     'probability': np.ones(3),
                     })
    gp = CATGrating(d=1./5000, order_selector=constant_order_factory(5), zoom=2)
    p5 = gp.process_photons(photons.copy())
    gm = CATGrating(d=1./5000, order_selector=constant_order_factory(-5), zoom=2)
    m5 = gm.process_photons(photons.copy())
    for g in [gm, gp]:
        assert np.all(g.order_sign_convention(h2e(photons['dir'])) == np.array([1, 1, -1]))
    assert p5['dir'][1, 1] > 0
    assert p5['dir'][2, 1] < 0
    assert m5['dir'][1, 1] < 0
    assert m5['dir'][2, 1] > 0
Beispiel #21
0
def test_CAT_order_convention():
    dirs = np.array([[-1, 0., 0., 0],
                     [-1, 0.01, -0.01, 0],
                     [-1, 0.01, 0.01, 0],
                     [-1, -0.01, 0.01, 0],
                     [-1, -0.01, -0.01, 0]])
    photons = Table({'pos': np.ones((5, 4)),
                     'dir': dirs,
                     'energy': np.ones(5),
                     'polarization': np.ones(5),
                     'probability': np.ones(5),
                     })
    gp = CATGrating(d=1./5000, order_selector=constant_order_factory(5), zoom=2)
    p5 = gp.process_photons(photons.copy())
    gm = CATGrating(d=1./5000, order_selector=constant_order_factory(-5), zoom=2)
    m5 = gm.process_photons(photons.copy())
    for g in [gm, gp]:
        assert np.all(g.order_sign_convention(h2e(photons['dir'])) == np.array([1, -1, -1, 1, 1]))
    assert np.all(p5['dir'][1:3, 1] > 0)
    assert np.all(p5['dir'][3:, 1] < 0)
    assert np.all(m5['dir'][1:3, 1] < 0)
    assert np.all(m5['dir'][3:, 1] > 0)
Beispiel #22
0
def test_fit_drop_empty(mock_fits_wcs):
    t0 = Table([[], []], names=('x', 'y'))
    t1 = Table([[1], [3]], names=('x', 'y'))
    wcscats = [
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t0.copy(), 'group_id': 1}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t1.copy(), 'group_id': 2}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t0.copy(), 'group_id': 2}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t0.copy(), 'group_id': 3}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t0.copy(), 'group_id': 3}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t1.copy(), 'group_id': 4}
        ),
        FITSWCS(
            copy.deepcopy(mock_fits_wcs),
            meta={'catalog': t1.copy(), 'group_id': 4}
        )
    ]

    align_wcs(wcscats, fitgeom='shift')

    status = [w.meta.get('fit_info')['status'] for w in wcscats]

    assert status[0] == 'FAILED: empty source catalog'
    assert status[3] == 'FAILED: empty source catalog'
    assert status[4] == 'FAILED: empty source catalog'

    if status[1] == 'SUCCESS':
        assert status[2] == 'SUCCESS'
        assert status[5] == 'REFERENCE'
        assert status[6] == 'REFERENCE'

    elif status[1] == 'REFERENCE':
        assert status[2] == 'REFERENCE'
        assert status[5] == 'SUCCESS'
        assert status[6] == 'SUCCESS'

    else:
        assert False
Beispiel #23
0
def test_geomarea_projection():
    '''When a ray sees the aperture under an angle the projected aperture size
    is smaller. This is accounted for by reducing the probability of this photon.'''
    photons = Table()
    photons['ra'] = [0., 45., 90., 135., 315.]
    photons['dec'] = np.zeros(5)
    photons['origin_coord'] = SkyCoord(photons['ra'], photons['dec'], unit='deg')
    photons['probability'] = np.ones(5)
    photons['time'] = np.arange(5)
    photons['polangle'] = np.zeros(5)

    fp = FixedPointing(coords=SkyCoord(0., 0., unit='deg'))
    photons = fp(photons)
    aper = RectangleAperture()
    p = aper(photons.copy())

    assert np.allclose(p['probability'], [1., 1./np.sqrt(2), 0, 0, 1./np.sqrt(2)])

    orientation = np.array([[0, 0, 1],[1, 0, 0],[0, 1, 0]])
    aper = RectangleAperture(orientation=orientation)
    p = aper(photons.copy())

    assert np.allclose(p['probability'], [ 0, 1./np.sqrt(2), 1., 1./np.sqrt(2), 0.])
Beispiel #24
0
def test_CAT_order_convention():
    dirs = np.array([[-1, 0., 0., 0], [-1, 0.01, -0.01, 0],
                     [-1, 0.01, 0.01, 0], [-1, -0.01, 0.01, 0],
                     [-1, -0.01, -0.01, 0]])
    photons = Table({
        'pos': np.ones((5, 4)),
        'dir': dirs,
        'energy': np.ones(5),
        'polarization': polarization_vectors(dirs, np.arange(5)),
        'probability': np.ones(5),
    })
    gp = CATGrating(d=1. / 5000, order_selector=OrderSelector([5]), zoom=2)
    p5 = gp(photons.copy())
    gm = CATGrating(d=1. / 5000, order_selector=OrderSelector([-5]), zoom=2)
    m5 = gm(photons.copy())
    for g in [gm, gp]:
        e_groove, e_perp, n = g.e_groove_coos(np.zeros((5, 2)))
        signs = g.order_sign_convention(photons['dir'], e_perp)
        assert np.all(signs == np.array([1, 1, 1, -1, -1]))
    assert np.all(p5['dir'][1:3, 1] > 0)
    assert np.all(p5['dir'][3:, 1] < 0)
    assert np.all(m5['dir'][1:3, 1] < 0)
    assert np.all(m5['dir'][3:, 1] > 0)
Beispiel #25
0
def test_CAT_order_convention():
    dirs = np.array([[-1, 0., 0., 0],
                     [-1, 0.01, -0.01, 0],
                     [-1, 0.01, 0.01, 0],
                     [-1, -0.01, 0.01, 0],
                     [-1, -0.01, -0.01, 0]])
    photons = Table({'pos': np.ones((5, 4)),
                     'dir': dirs,
                     'energy': np.ones(5),
                     'polarization': polarization_vectors(dirs, np.arange(5)),
                     'probability': np.ones(5),
                     })
    gp = CATGrating(d=1./5000, order_selector=OrderSelector([5]), zoom=2)
    p5 = gp(photons.copy())
    gm = CATGrating(d=1./5000, order_selector=OrderSelector([-5]), zoom=2)
    m5 = gm(photons.copy())
    for g in [gm, gp]:
        e_groove, e_perp, n = g.e_groove_coos(np.zeros((5, 2)))
        signs = g.order_sign_convention(photons['dir'], e_perp)
        assert np.all(signs == np.array([1, 1, 1, -1, -1]))
    assert np.all(p5['dir'][1:3, 1] > 0)
    assert np.all(p5['dir'][3:, 1] < 0)
    assert np.all(m5['dir'][1:3, 1] < 0)
    assert np.all(m5['dir'][3:, 1] > 0)
Beispiel #26
0
    def generate_evolution_tables(self):
        for pairnum, filnums in self.pairings.items():
            if pairnum == 0:
                continue

            current_calibs = Table(
                self.coarse_calibration_coefs[pairnum].copy())
            past_calibs = Table(self.coarse_calibration_coefs[pairnum -
                                                              1].copy())

            for column in current_calibs.colnames:
                current_calibs[
                    column] = current_calibs[column] - past_calibs[column]

            self.evolution_in_coarse_coefs[pairnum] = current_calibs.copy()
Beispiel #27
0
    def stats_rndmatch(self,
                       match,
                       ntest=1,
                       ncutoff=101,
                       mincutoff=0.0,
                       maxcutoff=1.0,
                       plot_to_file=None,
                       **kwargs):
        """
        Calculates match statistics (completness and reliability), using a
        random match, for a range of thresholds. This can be used later to
        select the optimal threshold.
        """
        # TODO: estimate uncertainties in the final statistics using e.g. bootstraping
        fstats = None
        for _ in range(ntest):
            match_rnd = self._match_rndcat(**kwargs)

            mask = match['match_flag'] == 1
            p_any0 = match[self._cutoff_column][mask]

            mask = match_rnd['match_flag'] == 1
            p_any0_offset = match_rnd[self._cutoff_column][mask]

            cutoffs = np.linspace(mincutoff, maxcutoff, num=ncutoff)

            stats = Table()
            stats['cutoff'] = cutoffs
            stats['completeness'] = [(p_any0 > c).mean() for c in cutoffs]
            stats['error_rate'] = [(p_any0_offset > c).mean() for c in cutoffs]
            stats['reliability'] = 1 - stats['error_rate']
            stats['CR'] = stats['completeness'] + stats['reliability']

            if fstats is None:
                fstats = stats.copy()
            else:
                for col in fstats.colnames[1:]:
                    fstats[col] += stats[col]

        for col in fstats.colnames[1:]:
            fstats[col] = fstats[col] / ntest

        if plot_to_file is not None:
            self._plot_stats(fstats, plot_to_file)

        return fstats
Beispiel #28
0
def generate_times_line_emission(spectra: Table,
                                 spectra_times: Table,
                                 verbose: bool = False):
    """
    Given a finished timeseries of spectra, produce the total line emission at each observation

    Arguments:
        spectra (Table): The time-series of spectra
        spectra_times (Table): The times at which the spectra are taken (i.e. the fake observations)
        verbose (bool): Whether or not to report on the total line variation

    Return:
        Table: A table with the line emission for each timestep
    """
    line_times = spectra_times.copy(copy_data=True)
    line_times['time'] = line_times['time'].quantity.to(u.s)
    line_times['line'] = np.zeros(len(line_times))

    # Integrated flux error dL = SQRT(dBin1^2 + dBin2^2 + ...)
    # dL = SQRT(N_Bins * dBin^2) = SQRT(N_Bins) * dBin, we divide by SQRT(N_Bins)
    line_times['line_error'] = np.zeros(len(line_times))
    line_times['line_error'] = np.sqrt(len(spectra)) * spectra['error'][0]

    # For each spectrum in the output, sum the total emission
    # This obviously only works for line spectra!
    for step in range(0, len(line_times)):
        line_times['line'][step] = np.sum(spectra[spectra.colnames[5 + step]])

    if verbose:
        print("Variation is: {}".format(
            np.amax(line_times['line']) - np.amin(line_times['line'])))

    #
    # spec_max = np.amax(line_times['line'])
    # spec_min = np.amin(line_times['line'])

    # error = np.array(line_times['line_error']) / (spec_max - spec_min)
    # error = (error * 9)
    # value = (np.array(line_times['line']) - spec_min) / (spec_max - spec_min)
    # value = (value * 9) + 1
    #
    # line_times['line_error'] = error
    # line_times['line'] = value

    return line_times
Beispiel #29
0
def test_jitter():
    '''test size and randomness of jitter'''
    n = 100000
    ra = np.random.rand(n) * 2 * np.pi
    # Note that my rays are not evenly distributed on the sky.
    # No need to be extra fancy here, it's probably even better for
    # a test to have more scrutiny around the poles.
    dec = (np.random.rand(n) * 2. - 1.) / 2. * np.pi
    time = np.arange(n)
    pol = np.ones_like(ra)
    prob = np.ones_like(ra)
    photons = Table([ra, dec, time, pol, prob],
                    names=['ra', 'dec', 'time', 'polangle', 'probability'])
    fixed = FixedPointing(coords=SkyCoord(25., -10., unit='deg'))
    jittered = JitterPointing(coords=SkyCoord(25., -10., unit='deg'),
                              jitter=1. * u.arcsec)
    p_fixed = fixed(photons.copy())
    p_jitter = jittered(photons)

    assert np.allclose(np.linalg.norm(p_fixed['dir'], axis=1), 1.)
    assert np.allclose(np.linalg.norm(p_jitter['dir'], axis=1), 1.)

    prod = np.sum(p_fixed['dir'] * p_jitter['dir'], axis=1)
    # sum can give values > 1 due to rounding errors
    # That would make arccos fail, so catch those here
    ind = prod > 1.
    if ind.sum() > 0:
        prod[ind] = 1.

    alpha = np.arccos(prod)
    # in this formula alpha will always be the abs(angle).
    # Flip some signs to recover input normal distribtution.
    alpha *= np.sign(np.random.rand(n) - 0.5)
    # center?
    assert np.abs(np.mean(alpha)) * 3600. < 0.01
    # Is right size?
    assert np.std(np.rad2deg(alpha)) > (0.9 / 3600.)
    assert np.std(np.rad2deg(alpha)) < (1.1 / 3600.)
    # Does it affect y and z independently?
    coeff, p = pearsonr(p_fixed['dir'][:, 1] - p_jitter['dir'][:, 1],
                        p_fixed['dir'][:, 2] - p_jitter['dir'][:, 2])
    assert abs(coeff) < 0.01
Beispiel #30
0
def test_jitter():
    '''test size and randomness of jitter'''
    n = 100000
    ra = np.random.rand(n) * 2 * np.pi
    # Note that my rays are not evenly distributed on the sky.
    # No need to be extra fancy here, it's probably even better for
    # a test to have more scrutiny around the poles.
    dec = (np.random.rand(n) * 2. - 1.) / 2. * np.pi
    time = np.arange(n)
    pol = np.ones_like(ra)
    prob = np.ones_like(ra)
    photons = Table([ra, dec, time, pol, prob],
                    names=['ra', 'dec', 'time', 'polangle', 'probability'])
    fixed = FixedPointing(coords = SkyCoord(25., -10., unit='deg'))
    jittered = JitterPointing(coords = SkyCoord(25., -10., unit='deg'),
                              jitter=1. * u.arcsec)
    p_fixed = fixed(photons.copy())
    p_jitter = jittered(photons)

    assert np.allclose(np.linalg.norm(p_fixed['dir'], axis=1), 1.)
    assert np.allclose(np.linalg.norm(p_jitter['dir'], axis=1), 1.)

    prod = np.sum(p_fixed['dir'] * p_jitter['dir'], axis=1)
    # sum can give values > 1 due to rounding errors
    # That would make arccos fail, so catch those here
    ind = prod > 1.
    if ind.sum() > 0:
        prod[ind] = 1.

    alpha = np.arccos(prod)
    # in this formula alpha will always be the abs(angle).
    # Flip some signs to recover input normal distribtution.
    alpha *= np.sign(np.random.rand(n)-0.5)
    # center?
    assert np.abs(np.mean(alpha)) * 3600. < 0.01
    # Is right size?
    assert np.std(np.rad2deg(alpha)) > (0.9 / 3600.)
    assert np.std(np.rad2deg(alpha)) < (1.1 / 3600.)
    # Does it affect y and z independently?
    coeff, p = pearsonr(p_fixed['dir'][:, 1] - p_jitter['dir'][:, 1],
                        p_fixed['dir'][:, 2] - p_jitter['dir'][:, 2])
    assert abs(coeff) < 0.01
Beispiel #31
0
def apply_spectra_error(spectra: Table):
    """
    Given a timeseries of spectra with an 'error' column, creates a copy and
    applies random normally-distributed errors to the values at each timestep.

    Arguments:
        spectra (Table):

    Returns:
        Table: A copy of the input spectra with the errors applied
    """
    # Now we have the final spectra, create clean copies then add the experimental errors
    clean_copy = spectra.copy(copy_data=True)

    for column in spectra.colnames[5:]:
        # For each spectrum
        if 'value' in column:
            for j in range(0, len(spectra)):
                # For each wavelength bin in each spectrum, add a random error
                spectra[column][j] += normal(scale=spectra['error'][j])
    return clean_copy
Beispiel #32
0
def match_observation_to_source(reference_catalog: Table, photometry_result: Table) \
        -> Table:
    # TODO is this really necessary? there seems to be x_0, y_0 in results,
    #  can use this in cases where the guess is known
    """
    Match the closest points in a photometry catalogue to the input catalogue
    :param reference_catalog: Table containing input positions in 'x' and 'y' columns
    :param photometry_result: Table containing measured positions in 'x_fit' and 'y_fit' columns
    :return: photometry_result updated with 'x_orig', 'y_orig' and 'offset' (euclidean distance) columns
    """
    from scipy.spatial import cKDTree

    x_y_pixel = np.array((reference_catalog['x'], reference_catalog['y'], reference_catalog['m'])).T
    # noinspection PyArgumentList
    lookup_tree = cKDTree(x_y_pixel[:, :2])  # only feed x and y to the lookup tree

    photometry_result = photometry_result.copy()
    photometry_result['x_orig'] = np.nan
    photometry_result['y_orig'] = np.nan
    photometry_result['m_orig'] = np.nan
    photometry_result['offset'] = np.nan

    seen_indices = set()
    for row in photometry_result:

        # noinspection PyUnresolvedReferences
        dist, index = lookup_tree.query((row['x_fit'], row['y_fit']))
        # if index in seen_indices:
        #     print('Warning: multiple match for source')  # TODO make this message more useful/use warning module
        seen_indices.add(index)
        row['x_orig'] = x_y_pixel[index, 0]
        row['y_orig'] = x_y_pixel[index, 1]
        row['m_orig'] = x_y_pixel[index, 2]

    photometry_result['x_offset'] = photometry_result['x_fit'] - photometry_result['x_orig']
    photometry_result['y_offset'] = photometry_result['y_fit'] - photometry_result['y_orig']
    photometry_result['offset'] = np.sqrt(photometry_result['x_offset']**2 + photometry_result['y_offset']**2)

    return photometry_result
Beispiel #33
0
def test_zeros_order():
    '''Photons diffracted into order 0 should just pass through'''
    dir = random((10, 4))
    dir[0, :] = dir[
        0, :] * 10  # make sure mostion is mostly along x so they hit grating
    photons = Table({
        'pos':
        random((10, 4)) - 1,
        'dir':
        dir,
        'energy':
        random(10),
        'polarization':
        polarization_vectors(dir, np.random.rand(10)),
        'probability':
        np.ones(10),
    })
    # Make sure homogeneous coordiantes are valid
    # and infall is normal
    photons['pos'][:, 3] = 1
    photons['dir'][:, 1:] = 0

    p = photons.copy()
    g0 = FlatGrating(d=1. / 500.,
                     order_selector=OrderSelector([0]),
                     zoom=np.array([1., 50., 50.]))
    p = g0(p)
    # Direction unchanged
    d_in = h2e(photons['dir'])
    d_out = h2e(p['dir'])
    # normalize
    d_in = d_in / np.sqrt(np.sum(d_in**2, axis=-1))[:, None]
    d_out = d_out / np.sqrt(np.sum(d_out**2, axis=-1))[:, None]
    assert np.allclose(d_in, d_out)
    # all intersection points in y-z plane
    assert np.allclose(p['pos'][:, 0], 0.)
    # no offset between old and new ray
    assert np.allclose(
        np.cross(h2e(photons['pos']) - h2e(p['pos']), h2e(p['dir'])), 0)
Beispiel #34
0
def make_mtl(targets, zcat=None, trim=False):
    """Adds NUMOBS, PRIORITY, and GRAYLAYER columns to a targets table.

    Parameters
    ----------
    targets : :class:`~astropy.table.Table`
        A table with columns ``TARGETID``, ``DESI_TARGET``.
    zcat : :class:`~astropy.table.Table`, optional
        Redshift catalog table with columns ``TARGETID``, ``NUMOBS``, ``Z``,
        ``ZWARN``.
    trim : :class:`bool`, optional
        If ``True``, don't include targets that don't need
        any more observations.  If ``False`` (default), include every input target.

    Returns
    -------
    :class:`~astropy.table.Table`
        MTL Table with targets columns plus

        * NUMOBS_MORE - number of additional observations requested
        * PRIORITY - target priority (larger number = higher priority)
        * GRAYLAYER - can this be observed during gray time?

    Notes
    -----
        TODO: Check if input targets is ever altered (it shouldn't...).
    """
    n = len(targets)
    targets = Table(targets)
    if zcat is not None:
        ztargets = join(targets, zcat['TARGETID', 'NUMOBS', 'Z', 'ZWARN'],
                            keys='TARGETID', join_type='outer')
        if ztargets.masked:
            unobs = ztargets['NUMOBS'].mask
            ztargets['NUMOBS'][unobs] = 0
    else:
        ztargets = targets.copy()
        ztargets['NUMOBS'] = np.zeros(n, dtype=np.int32)
        ztargets['Z'] = -1 * np.ones(n, dtype=np.float32)
        ztargets['ZWARN'] = -1  * np.ones(n, dtype=np.int32)

    ztargets['NUMOBS_MORE'] = np.maximum(0, calc_numobs(ztargets) - ztargets['NUMOBS'])

    mtl = ztargets.copy()
    ### mtl['NUMOBS_MORE'] = ztargets['NUMOBS_MORE']
    mtl['PRIORITY'] = calc_priority(ztargets)

    #- If priority went to 0, then NUMOBS_MORE should also be 0
    ii = (mtl['PRIORITY'] == 0)
    mtl['NUMOBS_MORE'][ii] = 0

    #- remove extra zcat columns from join(targets, zcat) that are not needed
    #- for final MTL output
    for name in ['NUMOBS', 'Z', 'ZWARN']:
        mtl.remove_column(name)

    #- ELGs can be observed during gray time
    graylayer = np.zeros(n, dtype='i4')
    iselg = (mtl['DESI_TARGET'] & desi_mask.ELG) != 0
    graylayer[iselg] = 1
    mtl['GRAYLAYER'] = graylayer

    if trim:
        notdone = mtl['NUMOBS_MORE'] > 0
        mtl = mtl[notdone]

    #- filtering can reset the fill_value, which is just wrong wrong wrong
    #- See https://github.com/astropy/astropy/issues/4707
    #- and https://github.com/astropy/astropy/issues/4708
    mtl['NUMOBS_MORE'].fill_value = -1

    return mtl
Beispiel #35
0
class Pan_STARRS_Survey(surveycoord.SurveyCoord):
    """
    A class to access all the catalogs hosted on the
    Vizier database. Inherits from SurveyCoord. This
    is a super class not meant for use by itself and
    instead meant to instantiate specific children
    classes like PAN-STARRS_Survey
    """
    def __init__(self, coord, radius, **kwargs):
        surveycoord.SurveyCoord.__init__(self, coord, radius, **kwargs)

        self.Survey = "Pan_STARRS"

    def get_catalog(self, query_fields=None, release="dr2", table="stack"):
        """
        Query a catalog in the VizieR database for
        photometry.

        Args:
            query_fields: list, optional
                A list of query fields to
                get.
            release: str, optional
                "dr1" or "dr2" (default: "dr2").
                Data release version.
            table: str, optional
                "mean","stack" or "detection"
                (default: "stack"). The data table to
                search within.
        
        Returns:
            catalog: astropy.table.Table
                Contains all query results
        """
        assert self.radius <= 0.5 * u.deg, "Cone serches have a maximum radius"
        #Validate table and release input
        _check_legal(table, release)
        url = "https://catalogs.mast.stsci.edu/api/v0.1/panstarrs/{:s}/{:s}.csv".format(
            release, table)
        if query_fields is None:
            query_fields = [
                'objID', 'raStack', 'decStack', 'objInfoFlag', 'qualityFlag'
            ]
            query_fields += [
                '{:s}PSFmag'.format(band) for band in PanSTARRS_bands
            ]
            query_fields += [
                '{:s}PSFmagErr'.format(band) for band in PanSTARRS_bands
            ]
        #Validate columns
        _check_columns(query_fields, table, release)
        data = {}
        data['ra'] = self.coord.ra.value
        data['dec'] = self.coord.dec.value
        data['radius'] = self.radius.to(u.deg).value
        data['columns'] = query_fields
        ret = requests.get(url, params=data)
        ret.raise_for_status()
        if len(ret.text) == 0:
            self.catalog = Table()
            self.catalog.meta['radius'] = self.radius
            self.catalog.meta['survey'] = self.survey
            # Validate
            self.validate_catalog()
            return self.catalog.copy()
        photom_catalog = Table.read(ret.text, format="ascii.csv")
        pdict = photom['Pan-STARRS']
        photom_catalog = catalog_utils.clean_cat(photom_catalog, pdict)
        #
        self.catalog = catalog_utils.sort_by_separation(photom_catalog,
                                                        self.coord,
                                                        radec=('ra', 'dec'),
                                                        add_sep=True)
        # Meta
        self.catalog.meta['radius'] = self.radius
        self.catalog.meta['survey'] = self.survey

        #Validate
        self.validate_catalog()

        #Return
        return self.catalog.copy()

    def get_cutout(self, imsize=30 * u.arcsec, filt="irg", output_size=None):
        """
        Grab a color cutout (PNG) from Pan-STARRS

        Args:
            imsize (Quantity):  Angular size of image desired
            filt (str): A string with the three filters to be used
            output_size (int): Output image size in pixels. Defaults
                                to the original cutout size.
        Returns:
            PNG image, None (None for the header).
        """
        assert len(filt) == 3, "Need three filters for a cutout."
        #Sort filters from red to blue
        filt = filt.lower(
        )  #Just in case the user is cheeky about the filter case.
        reffilt = "yzirg"
        idx = np.argsort([reffilt.find(f) for f in filt])
        newfilt = ""
        for i in idx:
            newfilt += filt[i]
        #Get image url
        url = _get_url(self.coord,
                       imsize=imsize,
                       filt=newfilt,
                       output_size=output_size,
                       color=True,
                       imgformat='png')
        self.cutout = images.grab_from_url(url)
        self.cutout_size = imsize
        return self.cutout.copy(),

    def get_image(self, imsize=30 * u.arcsec, filt="i", timeout=120):
        """
        Grab a fits image from Pan-STARRS in a
        specific band.

        Args:
            imsize (Quantity): Angular size of the image desired
            filt (str): One of 'g','r','i','z','y' (default: 'i')
            timeout (int): Number of seconds to timout the query (default: 120 s)
        Returns:
            hdu: fits header data unit for the downloaded image
        """
        assert len(
            filt
        ) == 1 and filt in "grizy", "Filter name must be one of 'g','r','i','z','y'"
        url = _get_url(self.coord, imsize=imsize, filt=filt,
                       imgformat='fits')[0]
        imagedat = fits.open(
            astroutils.data.download_file(url,
                                          cache=True,
                                          show_progress=False,
                                          timeout=timeout))[0]
        return imagedat
Beispiel #36
0
    def injectprev(self,fmll,presetll=None):
        # # determine if there are any pre-initialized lines (from previous run) and set those free parameters
        # make a unique ID for each line
        fmllcode = np.empty(len(fmll),dtype=object)
        for nnn,ill in enumerate(fmll):
            fmllcode[nnn] = "".join(
                [
                str(ill['WL']),
                str(ill['GFLOG']),
                ill['CODE'],
                # str(ill['E']),str(ill['EP']),
                str(ill['XJ']),str(ill['XJP']),
                str(ill['LABEL']),str(ill['LABELP']),
                # ill['REF'],
                # str(ill['ISO1']),str(ill['X1']),
                # str(ill['ISO2']),str(ill['X2']),
                str(ill['ISO1']),
                str(ill['ISO2']),
                ill['OTHER']
                ]
                ).replace(" ","")

        # Read previous table: LINE INFO, DWL, DGFLOG, DGAMMA (will figure out which GAMMA after the fact)
        if presetll == None:
            initlines = '/n/conroyfs1/pac/FAL/data/LL/SL_pars4.h5'
            ilines = Table(np.array(h5py.File(initlines,'r')['data']))
        else:
            initlines = presetll
            ilines_i = h5py.File(initlines,'r')['ll']

            ilines_spaces = np.linspace(0,ilines_i.len(),10,dtype=int)

            for ii in range(len(ilines_spaces)-1):
                ilines_ii = np.array(ilines_i[ilines_spaces[ii]:ilines_spaces[ii+1]-1])
                selind = np.in1d(ilines_ii['WL'],fmll['WL'])
                ilines_iii = Table(ilines_ii[selind])
                if ii == 0:
                    ilines = ilines_iii.copy()
                else:
                    if len(ilines_iii) > 0:
                        ilines = vstack([ilines,ilines_iii])

        # make unique ID for lines in preset linelist
        ilines.sort('WL')
        ilines = ilines[ilines['DWL'] != 0.0]
        ilines['UNIQ_ID'] = np.empty(len(ilines),dtype=object)
        for nnn,ill in enumerate(ilines):
            ilines['UNIQ_ID'][nnn] = "".join(
                [
                str(ill['WL']),
                str(ill['GFLOG']),
                ill['CODE'],
                # str(ill['E']),str(ill['EP']),
                str(ill['XJ']),str(ill['XJP']),
                str(ill['LABEL']),str(ill['LABELP']),
                # ill['REF'],
                # str(ill['ISO1']),str(ill['X1']),
                # str(ill['ISO2']),str(ill['X2']),
                str(ill['ISO1']),
                str(ill['ISO2']),
                ill['OTHER'][1:]
                ]
            ).replace(" ","")

        # cycle through line list and find matches
        numpreset = 0
        for ii,fmlc in enumerate(fmllcode):
            cond_intl = np.in1d(ilines['UNIQ_ID'],fmlc,assume_unique=True)
            if any(cond_intl):
                numpreset = numpreset + 1
                # print("Pro: {0} --> Setting Previous Pars for WL = {1:7.4f}".format(self.ID,float(fmll['WL'][ii])))
                fmll['DWL'][ii]     = float('{0:6.4f}'.format(float(ilines['DWL'][cond_intl])))
                fmll['DGFLOG'][ii]  = float('{0:6.4f}'.format(float(ilines['DGFLOG'][cond_intl])))
                fmll['DGAMMAW'][ii] = float('{0:6.4f}'.format(float(ilines['DGAMMAW'][cond_intl])))
                fmll['DGAMMAR'][ii] = float('{0:6.4f}'.format(float(ilines['DGAMMAR'][cond_intl])))
                fmll['DGAMMAS'][ii] = float('{0:6.4f}'.format(float(ilines['DGAMMAS'][cond_intl])))

        print("Pro: {0} --> Setting Previous Pars for {1} lines from {2}".format(self.ID,numpreset,initlines))

        # clean up some memory
        del ilines
        del ilines_i
        del ilines_ii
        del ilines_iii

        return fmll
Beispiel #37
0
def get_modern_data(manvr, dwell, starcheck):
    catalog = Table(starcheck['cat'])
    catalog.sort('idx')
    # Filter the catalog to be just acquisition stars
    catalog = catalog[(catalog['type'] == 'ACQ') | (catalog['type'] == 'BOT')]
    slot_for_pos = [cat_row['slot'] for cat_row in catalog]
    pos_for_slot = dict([(slot, idx) for idx, slot in enumerate(slot_for_pos)])
    # Also, save out the starcheck index for each slot for later
    index_for_slot = dict([(cat_row['slot'], cat_row['idx'])
                           for cat_row in catalog])

    # Get telemetry
    msids = [
        'AOACASEQ', 'AOACQSUC', 'AOFREACQ', 'AOFWAIT', 'AOREPEAT', 'AOACSTAT',
        'AOACHIBK', 'AOFSTAR', 'AOFATTMD', 'AOACPRGS', 'AOATUPST', 'AONSTARS',
        'AOPCADMD', 'AORFSTR1', 'AORFSTR2', 'AOATTQT1', 'AOATTQT2', 'AOATTQT3',
        'AOATTQT4'
    ]
    per_slot = [
        'AOACQID', 'AOACFCT', 'AOIMAGE', 'AOACMAG', 'AOACYAN', 'AOACZAN',
        'AOACICC', 'AOACIDP', 'AOACIIR', 'AOACIMS', 'AOACIQB', 'AOACISP'
    ]
    slot_msids = [
        field + '%s' % slot for field in per_slot for slot in range(0, 8)
    ]

    start_time = DateTime(manvr.acq_start).secs
    stop_time = DateTime(dwell.start).secs + 100
    raw_eng_data = fetch.MSIDset(msids + slot_msids,
                                 start_time,
                                 stop_time,
                                 filter_bad=True)
    eng_data = Table([raw_eng_data[col].vals for col in msids], names=msids)
    for field in slot_msids:
        eng_data.add_column(Column(name=field, data=raw_eng_data[field].vals))
        times = Table([raw_eng_data['AOACASEQ'].times], names=['time'])
    if not len(eng_data['AOACASEQ']):
        raise ValueError("No telemetry for obsid {}".format(manvr.get_obsid()))

    # Estimate the offsets from the expected catalog positions
    dy, dz, star_info = _deltas_vs_obc_quat(eng_data, times['time'], catalog)
    # And add the deltas to the table
    for slot in range(0, 8):
        if slot not in dy:
            continue
        eng_data.add_column(
            Column(name='dy{}'.format(slot), data=dy[slot].data))
        eng_data.add_column(
            Column(name='dz{}'.format(slot), data=dz[slot].data))
        cat_entry = catalog[catalog['slot'] == slot][0]
        dmag = eng_data['AOACMAG{}'.format(slot)] - cat_entry['mag']
        eng_data.add_column(Column(name='dmag{}'.format(slot), data=dmag.data))

    # Get the one-shot delta quaternion and the dot product of the deltas
    delta_quat, dot_q = get_delta_quat(eng_data, times['time'], manvr)
    one_shot_length = np.degrees(2 * np.arccos(dot_q))
    one_shot_length = np.min([one_shot_length, 360 - one_shot_length])
    one_shot_length = one_shot_length * 3600

    # Update a copy of the telemetry structure with quaternions
    # corrected by the one-shot delta
    corr_eng_data = eng_data.copy()
    uncorr_times = (times['time'] < DateTime(manvr.guide_start).secs + 1.0)
    q_orig = Quat(q=np.array([
        eng_data[uncorr_times]['AOATTQT1'], eng_data[uncorr_times]['AOATTQT2'],
        eng_data[uncorr_times]['AOATTQT3'], eng_data[uncorr_times]['AOATTQT4']
    ]).transpose())
    q_corr = q_mult(delta_quat.q, q_orig.q)
    corr_eng_data['AOATTQT1'][uncorr_times] = q_corr.q.transpose()[0]
    corr_eng_data['AOATTQT2'][uncorr_times] = q_corr.q.transpose()[1]
    corr_eng_data['AOATTQT3'][uncorr_times] = q_corr.q.transpose()[2]
    corr_eng_data['AOATTQT4'][uncorr_times] = q_corr.q.transpose()[3]
    corr_dy, corr_dz, si = _deltas_vs_obc_quat(corr_eng_data, times['time'],
                                               catalog)
    # delete the now-extra copy of the data
    del corr_eng_data
    # And add the corrected deltas to the table
    for slot in range(0, 8):
        if slot not in corr_dy:
            continue
        eng_data.add_column(
            Column(name='corr_dy{}'.format(slot), data=corr_dy[slot].data))
        eng_data.add_column(
            Column(name='corr_dz{}'.format(slot), data=corr_dz[slot].data))

    # Also add the acquisition id in a useful way
    for slot in range(0, 8):
        if slot not in pos_for_slot:
            continue
        eng_data.add_column(
            Column(name='POS_ACQID{}'.format(slot),
                   data=eng_data['AOACQID{}'.format(pos_for_slot[slot])]))

    return eng_data, times['time'], one_shot_length, star_info
Beispiel #38
0
class TestPriorities(unittest.TestCase):
    def setUp(self):
        targdtype = [('DESI_TARGET', np.int64), ('BGS_TARGET', np.int64),
                     ('MWS_TARGET', np.int64), ('PRIORITY_INIT', np.int64),
                     ('NUMOBS_INIT', np.int64)]
        zdtype = [
            ('Z', np.float32),
            ('ZWARN', np.float32),
            ('NUMOBS', np.float32),
        ]

        n = 3

        self.targets = Table(np.zeros(n, dtype=targdtype))
        self.targets['TARGETID'] = list(range(n))

        self.zcat = Table(np.zeros(n, dtype=zdtype))
        self.zcat['TARGETID'] = list(range(n))

    def test_priorities(self):
        """Test that priorities are set correctly for both the main survey and SV.
        """
        # ADM loop through once for SV and once for the main survey.
        for prefix in ["", "SV1_"]:
            t = self.targets.copy()
            z = self.zcat.copy()

            main_names = ['DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET']
            for name in main_names:
                t.rename_column(name, prefix + name)

            # ADM retrieve the mask and column names for this survey flavor.
            colnames, masks, _ = main_cmx_or_sv(t)
            desi_target, bgs_target, mws_target = colnames
            desi_mask, bgs_mask, mws_mask = masks

            # - No targeting bits set is priority=0
            self.assertTrue(np.all(calc_priority(t, z) == 0))

            # - test QSO > (LRG_1PASS | LRG_2PASS) > ELG
            t[desi_target] = desi_mask.ELG
            self.assertTrue(
                np.all(
                    calc_priority(t, z) == desi_mask.ELG.priorities['UNOBS']))
            t[desi_target] |= desi_mask.LRG_1PASS
            self.assertTrue(
                np.all(
                    calc_priority(t, z) == desi_mask.LRG.priorities['UNOBS']))
            t[desi_target] |= desi_mask.LRG_2PASS
            self.assertTrue(
                np.all(
                    calc_priority(t, z) == desi_mask.LRG.priorities['UNOBS']))
            t[desi_target] |= desi_mask.QSO
            self.assertTrue(
                np.all(
                    calc_priority(t, z) == desi_mask.QSO.priorities['UNOBS']))

            # - different states -> different priorities

            # - Done is Done, regardless of ZWARN.
            t[desi_target] = desi_mask.ELG
            t["PRIORITY_INIT"], t["NUMOBS_INIT"] = initial_priority_numobs(t)
            z['NUMOBS'] = [0, 1, 1]
            z['ZWARN'] = [1, 1, 0]
            p = make_mtl(t, z)["PRIORITY"]

            self.assertEqual(p[0], desi_mask.ELG.priorities['UNOBS'])
            self.assertEqual(p[1], desi_mask.ELG.priorities['DONE'])
            self.assertEqual(p[2], desi_mask.ELG.priorities['DONE'])

            # - BGS FAINT targets are never DONE, only MORE_ZGOOD.
            t[desi_target] = desi_mask.BGS_ANY
            t[bgs_target] = bgs_mask.BGS_FAINT
            t["PRIORITY_INIT"], t["NUMOBS_INIT"] = initial_priority_numobs(t)
            z['NUMOBS'] = [0, 1, 1]
            z['ZWARN'] = [1, 1, 0]
            p = make_mtl(t, z)["PRIORITY"]

            self.assertEqual(p[0], bgs_mask.BGS_FAINT.priorities['UNOBS'])
            self.assertEqual(p[1], bgs_mask.BGS_FAINT.priorities['MORE_ZWARN'])
            self.assertEqual(p[2], bgs_mask.BGS_FAINT.priorities['MORE_ZGOOD'])
            # BGS_FAINT: {UNOBS: 2000, MORE_ZWARN: 2000, MORE_ZGOOD: 1000, DONE: 2, OBS: 1, DONOTOBSERVE: 0}

            # - BGS BRIGHT targets are never DONE, only MORE_ZGOOD.
            t[desi_target] = desi_mask.BGS_ANY
            t[bgs_target] = bgs_mask.BGS_BRIGHT
            t["PRIORITY_INIT"], t["NUMOBS_INIT"] = initial_priority_numobs(t)
            z['NUMOBS'] = [0, 1, 1]
            z['ZWARN'] = [1, 1, 0]
            p = make_mtl(t, z)["PRIORITY"]

            self.assertEqual(p[0], bgs_mask.BGS_BRIGHT.priorities['UNOBS'])
            self.assertEqual(p[1],
                             bgs_mask.BGS_BRIGHT.priorities['MORE_ZWARN'])
            self.assertEqual(p[2],
                             bgs_mask.BGS_BRIGHT.priorities['MORE_ZGOOD'])
            # BGS_BRIGHT: {UNOBS: 2100, MORE_ZWARN: 2100, MORE_ZGOOD: 1000, DONE: 2, OBS: 1, DONOTOBSERVE: 0}

            # BGS targets are NEVER done even after 100 observations
            t[desi_target] = desi_mask.BGS_ANY
            t[bgs_target] = bgs_mask.BGS_BRIGHT
            t["PRIORITY_INIT"], t["NUMOBS_INIT"] = initial_priority_numobs(t)
            z['NUMOBS'] = [0, 100, 100]
            z['ZWARN'] = [1, 1, 0]
            p = calc_priority(t, z)

            self.assertEqual(p[0], bgs_mask.BGS_BRIGHT.priorities['UNOBS'])
            self.assertEqual(p[1],
                             bgs_mask.BGS_BRIGHT.priorities['MORE_ZWARN'])
            self.assertEqual(p[2],
                             bgs_mask.BGS_BRIGHT.priorities['MORE_ZGOOD'])

            # BGS ZGOOD targets always have lower priority than MWS targets that
            # are not DONE.
            # ADM first discard N/S informational bits from bitmask as these
            # ADM should never trump the other bits.
            bgs_names = [
                name for name in bgs_mask.names()
                if 'NORTH' not in name and 'SOUTH' not in name
            ]
            mws_names = [
                name for name in mws_mask.names()
                if 'NORTH' not in name and 'SOUTH' not in name
            ]

            lowest_bgs_priority_zgood = min(
                [bgs_mask[n].priorities['MORE_ZGOOD'] for n in bgs_names])

            lowest_mws_priority_unobs = min(
                [mws_mask[n].priorities['UNOBS'] for n in mws_names])
            lowest_mws_priority_zwarn = min(
                [mws_mask[n].priorities['MORE_ZWARN'] for n in mws_names])
            lowest_mws_priority_zgood = min(
                [mws_mask[n].priorities['MORE_ZGOOD'] for n in mws_names])

            lowest_mws_priority = min(lowest_mws_priority_unobs,
                                      lowest_mws_priority_zwarn,
                                      lowest_mws_priority_zgood)

            self.assertLess(lowest_bgs_priority_zgood, lowest_mws_priority)

    def test_bright_mask(self):
        t = self.targets
        z = self.zcat
        t['DESI_TARGET'][0] = desi_mask.ELG
        t['DESI_TARGET'][1] = desi_mask.ELG | desi_mask.NEAR_BRIGHT_OBJECT
        t['DESI_TARGET'][2] = desi_mask.ELG | desi_mask.IN_BRIGHT_OBJECT
        p = calc_priority(t, z)
        self.assertEqual(
            p[0], p[1],
            "NEAR_BRIGHT_OBJECT shouldn't impact priority but {} != {}".format(
                p[0], p[1]))
        self.assertEqual(p[2], -1, "IN_BRIGHT_OBJECT priority not -1")

    def test_mask_priorities(self):
        for mask in [desi_mask, bgs_mask, mws_mask]:
            for name in mask.names():
                if name.startswith('STD') or name in [
                        'BGS_ANY', 'MWS_ANY', 'SECONDARY_ANY',
                        'IN_BRIGHT_OBJECT', 'NEAR_BRIGHT_OBJECT',
                        'BRIGHT_OBJECT', 'SKY', 'SV', 'NO_TARGET'
                ]:
                    self.assertEqual(mask[name].priorities, {},
                                     'mask.{} has priorities?'.format(name))
                else:
                    for state in obsmask.names():
                        self.assertIn(
                            state, mask[name].priorities,
                            '{} not in mask.{}.priorities'.format(state, name))

    def test_cmx_priorities(self):
        """Test that priority calculation can handle commissioning files.
        """
        t = self.targets.copy()
        z = self.zcat

        # ADM restructure the table to look like a commissioning table.
        t.rename_column('DESI_TARGET', 'CMX_TARGET')
        t.remove_column('BGS_TARGET')
        t.remove_column('MWS_TARGET')

        # - No targeting bits set is priority=0
        self.assertTrue(np.all(calc_priority(t, z) == 0))

        # ADM retrieve the cmx_mask.
        colnames, masks, _ = main_cmx_or_sv(t)
        cmx_mask = masks[0]

        # ADM test handling of unobserved SV0_BGS and SV0_MWS.
        for name in ["SV0_BGS", "SV0_MWS"]:
            t['CMX_TARGET'] = cmx_mask[name]
            self.assertTrue(
                np.all(
                    calc_priority(t, z) == cmx_mask[name].priorities['UNOBS']))

        # ADM done is Done, regardless of ZWARN.
        for name in ["SV0_BGS", "SV0_MWS"]:
            t['CMX_TARGET'] = cmx_mask[name]
            t["PRIORITY_INIT"], t["NUMOBS_INIT"] = initial_priority_numobs(t)
            z['NUMOBS'] = [0, 1, 1]
            z['ZWARN'] = [1, 1, 0]
            p = make_mtl(t, z)["PRIORITY"]

            self.assertEqual(p[0], cmx_mask[name].priorities['UNOBS'])
            self.assertEqual(p[1], cmx_mask[name].priorities['DONE'])
            self.assertEqual(p[2], cmx_mask[name].priorities['DONE'])

        # BGS ZGOOD targets always have lower priority than MWS targets that
        # are not DONE.
        lowest_bgs_priority_zgood = cmx_mask['SV0_BGS'].priorities[
            'MORE_ZGOOD']

        lowest_mws_priority_unobs = cmx_mask['SV0_MWS'].priorities['UNOBS']
        lowest_mws_priority_zwarn = cmx_mask['SV0_MWS'].priorities[
            'MORE_ZWARN']
        lowest_mws_priority_zgood = cmx_mask['SV0_MWS'].priorities[
            'MORE_ZGOOD']

        lowest_mws_priority = min(lowest_mws_priority_unobs,
                                  lowest_mws_priority_zwarn,
                                  lowest_mws_priority_zgood)

        self.assertLess(lowest_bgs_priority_zgood, lowest_mws_priority)
class TimeTable:

    masked = False

    def setup(self):

        # Initialize table
        self.table = Table(masked=self.masked)

        # Create column with mixed types
        np.random.seed(12345)
        self.table['i'] = np.arange(1000)
        self.table['a'] = np.random.random(1000)  # float
        self.table['b'] = np.random.random(1000) > 0.5  # bool
        self.table['c'] = np.random.random((1000,10))  # 2d column
        self.table['d'] = np.random.choice(np.array(list(string.ascii_letters)),1000)

        self.np_table = np.array(self.table)

        self.extra_row = {'a':1.2, 'b':True, 'c':np.repeat(1, 10), 'd': 'Z'}

        self.extra_column = np.random.randint(0, 100, 1000)

        self.row_indices = np.where(self.table['a'] > 0.9)[0]

        self.table_grouped = self.table.group_by('d')

        # Another table for testing joining
        self.other_table = Table(masked=self.masked)
        self.other_table['i'] = np.arange(1,1000,3)
        self.other_table['f'] = np.random.random()
        self.other_table.sort('f')

        # Another table for testing hstack
        self.other_table_2 = Table(masked=self.masked)
        self.other_table_2['g'] = np.random.random(1000)
        self.other_table_2['h'] = np.random.random((1000, 10))

        self.bool_mask = self.table['a'] > 0.6

    def time_table_slice_bool(self):
        table_subset = self.table[self.bool_mask]

    def time_table_slice_int(self):
        table_subset = self.table[self.row_indices]

    def time_column_slice_bool(self):
        col_subset = self.table['a'][self.bool_mask]

    def time_column_slice_int(self):
        col_subset = self.table['a'][self.row_indices]

    def time_column_get(self):
        self.table['c']

    def time_column_make_bool_mask(self):
        self.table['a'] > 0.6

    def time_multi_column_get(self):
        self.table[('a','c')]

    def time_column_set(self):
        self.table['a'] = 0.

    def time_column_set_all(self):
        self.table['b'][:] = True

    def time_column_set_row_subset(self):
        self.table['b'][self.bool_mask] = True

    def time_column_set_row_subset_int(self):
        self.table['b'][self.row_indices] = True

    def time_row_get(self):
        self.table[300]

    def time_iter_row(self):
        for row in self.table:
            pass

    def time_read_rows(self):
        for row in self.table:
            tuple(row)

    def time_item_get_rowfirst(self):
        self.table[300]['b']

    def time_item_get_colfirst(self):
        self.table['b'][300]

    def time_add_row(self):
        self.table.add_row(self.extra_row)
    time_add_row.number = 1
    time_add_row.repeat = 1

    def time_remove_row(self):
        self.table.remove_row(6)
    time_remove_row.number = 1
    time_remove_row.repeat = 1

    def time_remove_rows(self):
        self.table.remove_rows(self.row_indices)
    time_remove_rows.number = 1
    time_remove_rows.repeat = 1

    def time_add_column(self):
        self.table['e'] = self.extra_column
    time_add_column.number = 1
    time_add_column.repeat = 1

    def time_remove_column(self):
        self.table.remove_column('a')
    time_remove_column.number = 1
    time_remove_column.repeat = 1

    def time_init_from_np_array_no_copy(self):
        Table(self.np_table, copy=False)

    def time_init_from_np_array_copy(self):
        Table(self.np_table, copy=True)

    def time_copy_table(self):
        self.table.copy()

    def time_copy_column(self):
        self.table['a'].copy()

    def time_group(self):
        self.table.group_by('d')

    def time_aggregate(self):
        # Test aggregate with a function that supports reduceat
        self.table_grouped.groups.aggregate(np.sum)

    def time_aggregate_noreduceat(self):
        # Test aggregate with a function that doesn't support reduceat
        self.table_grouped.groups.aggregate(lambda x: np.sum(x))

    def time_sort(self):
        self.table.sort('a')

    def time_join_inner(self):
        join(self.table, self.other_table, keys="i", join_type='inner')

    def time_join_outer(self):
        join(self.table, self.other_table, keys="i", join_type='outer')

    def time_hstack(self):
        hstack([self.table, self.other_table_2])

    def time_vstack(self):
        vstack([self.table, self.table])
Beispiel #40
0
        'band',
        'bsens',
        'selfcaliter',
    ])
    tbl.write("fields_summary_table.ecsv", overwrite=True)

    shutil.copy(
        "fields_summary_table.ecsv",
        "/orange/adamginsburg/web/secure/ALMA-IMF/October2020Release/tables/")

    from latex_info import (latexdict, format_float, round_to_n, rounded,
                            rounded_arr, strip_trailing_zeros, exp_to_tex)

    latexdict = latexdict.copy()

    ltbl = tbl.copy()
    ltbl['fields'] = [len(x) for x in tbl['fields']]

    # caption needs to be *before* preamble.
    #latexdict['caption'] = 'Continuum Source IDs and photometry'
    latexdict[
        'header_start'] = '\label{tab:selfcal_fields}'  #\n\\footnotesize'
    latexdict[
        'preamble'] = '\caption{Selfcal Field Inclusion}\n\\resizebox{\\textwidth}{!}{'
    latexdict['col_align'] = 'l' * len(ltbl.columns)
    latexdict['tabletype'] = 'table*'
    latexdict['tablefoot'] = (
        "}\par\n"
        "Number of fields included in each self-calibration entry")

    ltbl.write(
Beispiel #41
0
def get_modern_data(manvr, dwell, starcheck):
    catalog = Table(starcheck['cat'])
    catalog.sort('idx')
    # Filter the catalog to be just acquisition stars
    catalog = catalog[(catalog['type'] == 'ACQ') | (catalog['type'] == 'BOT')]
    slot_for_pos = [cat_row['slot'] for cat_row in catalog]
    pos_for_slot = dict([(slot, idx) for idx, slot in enumerate(slot_for_pos)])
    # Also, save out the starcheck index for each slot for later
    index_for_slot = dict([(cat_row['slot'], cat_row['idx'])
                           for cat_row in catalog])

    # Get telemetry
    msids = ['AOACASEQ', 'AOACQSUC', 'AOFREACQ', 'AOFWAIT', 'AOREPEAT',
             'AOACSTAT', 'AOACHIBK', 'AOFSTAR', 'AOFATTMD', 'AOACPRGS',
             'AOATUPST', 'AONSTARS', 'AOPCADMD', 'AORFSTR1', 'AORFSTR2',
             'AOATTQT1', 'AOATTQT2', 'AOATTQT3', 'AOATTQT4']
    per_slot = ['AOACQID', 'AOACFCT', 'AOIMAGE',
                'AOACMAG', 'AOACYAN', 'AOACZAN',
                'AOACICC', 'AOACIDP', 'AOACIIR', 'AOACIMS',
                'AOACIQB', 'AOACISP']
    slot_msids = [field + '%s' % slot
                  for field in per_slot
                  for slot in range(0, 8)]

    start_time = DateTime(manvr.acq_start).secs
    stop_time = DateTime(dwell.start).secs + 100
    raw_eng_data = fetch.MSIDset(msids + slot_msids,
                                 start_time,
                                 stop_time,
                                 filter_bad=True)
    eng_data = Table([raw_eng_data[col].vals for col in msids],
                     names=msids)
    for field in slot_msids:
        eng_data.add_column(
            Column(
                name=field, data=raw_eng_data[field].vals))
        times = Table([raw_eng_data['AOACASEQ'].times],
                      names=['time'])
    if not len(eng_data['AOACASEQ']):
        raise ValueError("No telemetry for obsid {}".format(manvr.get_obsid()))

    # Estimate the offsets from the expected catalog positions
    dy, dz, star_info = _deltas_vs_obc_quat(eng_data, times['time'], catalog)
    # And add the deltas to the table
    for slot in range(0, 8):
        if slot not in dy:
            continue
        eng_data.add_column(Column(name='dy{}'.format(slot),
                                   data=dy[slot].data))
        eng_data.add_column(Column(name='dz{}'.format(slot),
                                   data=dz[slot].data))
        cat_entry = catalog[catalog['slot'] == slot][0]
        dmag = eng_data['AOACMAG{}'.format(slot)] - cat_entry['mag']
        eng_data.add_column(Column(name='dmag{}'.format(slot),
                                   data=dmag.data))

    # Get the one-shot delta quaternion and the dot product of the deltas
    delta_quat, dot_q = get_delta_quat(eng_data, times['time'], manvr)
    one_shot_length = np.degrees(2 * np.arccos(dot_q))
    one_shot_length = np.min([one_shot_length, 360 - one_shot_length])
    one_shot_length = one_shot_length * 3600

    # Update a copy of the telemetry structure with quaternions
    # corrected by the one-shot delta
    corr_eng_data = eng_data.copy()
    uncorr_times = (times['time'] < DateTime(manvr.guide_start).secs + 1.0)
    q_orig = Quat(np.array([eng_data[uncorr_times]['AOATTQT1'],
                            eng_data[uncorr_times]['AOATTQT2'],
                            eng_data[uncorr_times]['AOATTQT3'],
                            eng_data[uncorr_times]['AOATTQT4']]).transpose())
    q_corr = q_mult(delta_quat.q, q_orig.q)
    corr_eng_data['AOATTQT1'][uncorr_times] = q_corr.q.transpose()[0]
    corr_eng_data['AOATTQT2'][uncorr_times] = q_corr.q.transpose()[1]
    corr_eng_data['AOATTQT3'][uncorr_times] = q_corr.q.transpose()[2]
    corr_eng_data['AOATTQT4'][uncorr_times] = q_corr.q.transpose()[3]
    corr_dy, corr_dz, si = _deltas_vs_obc_quat(corr_eng_data, times['time'], catalog)
    # delete the now-extra copy of the data
    del corr_eng_data
    # And add the corrected deltas to the table
    for slot in range(0, 8):
        if slot not in corr_dy:
            continue
        eng_data.add_column(Column(name='corr_dy{}'.format(slot),
                                   data=corr_dy[slot].data))
        eng_data.add_column(Column(name='corr_dz{}'.format(slot),
                                   data=corr_dz[slot].data))

    # Also add the acquisition id in a useful way
    for slot in range(0, 8):
        if slot not in pos_for_slot:
            continue
        eng_data.add_column(
            Column(
                name='POS_ACQID{}'.format(slot),
                data=eng_data['AOACQID{}'.format(pos_for_slot[slot])]))

    return eng_data, times['time'], one_shot_length, star_info
Beispiel #42
0
def run(cfg):
    """
    Run hugs pipeline using SExtractor for the final detection 
    and photometry.

    Parameters
    ----------
    cfg : hugs_pipe.Config 
        Configuration object which stores all params 
        as well as the exposure object. 

    Returns
    -------
    results : lsst.pipe.base.Struct
        Object containing results:
        results.all_detections : catalog of all detections
        results.sources : catalog of sources we are keeping
        results.exp : exposure object for this run
        results.exp_clean : cleaned exposure object for this run
        results.success : boolean flag of run status 
    """

    assert cfg.tract and cfg.patch, 'No patch id given!'
    cfg.timer  # start timer

    ############################################################
    # Get masked image and check if we have enough good data
    ############################################################

    try:

        mi = cfg.exp[cfg.band_detect].getMaskedImage()
        mask = mi.getMask()

        if cfg.exp.patch_meta.good_data_frac < cfg.min_good_data_frac:
            cfg.logger.warning('***** not enough data!!! ****')
            results = _null_return(cfg)
            return results

        ############################################################
        # Image thesholding at low and high thresholds. In both
        # cases, the image is smoothed at the psf scale.
        ############################################################

        mi_smooth = imtools.smooth_gauss(mi, cfg.psf_sigma)
        cfg.logger.info('performing low threshold at '
                        '{} sigma'.format(cfg.thresh_low['thresh']))
        fpset_low = prim.image_threshold(mi_smooth,
                                         mask=mask,
                                         plane_name='THRESH_LOW',
                                         **cfg.thresh_low)
        cfg.logger.info('performing high threshold at '
                        '{} sigma'.format(cfg.thresh_high['thresh']))
        fpset_high = prim.image_threshold(mi_smooth,
                                          mask=mask,
                                          plane_name='THRESH_HIGH',
                                          **cfg.thresh_high)

        ############################################################
        # Get "cleaned" image, with noise replacement
        ############################################################

        cfg.logger.info('generating cleaned exposure')
        exp_clean = prim.clean(cfg.exp[cfg.band_detect], fpset_low,
                               **cfg.clean)
        mi_clean = exp_clean.getMaskedImage()
        mask_clean = mi_clean.getMask()

        ############################################################
        # Detect sources and measure props with SExtractor
        ############################################################

        cfg.logger.info('detecting in {}-band'.format(cfg.band_detect))
        label = '{}-{}-{}'.format(cfg.tract, cfg.patch[0], cfg.patch[-1])

        cfg.logger.info('cleaning non-detection bands')
        replace = cfg.exp.get_mask_array(cfg.band_detect)
        for band in cfg.bands:
            if band != cfg.band_detect:
                mi_band = cfg.exp[band].getMaskedImage()
                noise_array = utils.make_noise_image(mi_band, cfg.rng)
                mi_band.getImage().getArray()[replace] = noise_array[replace]

        sources = Table()

        for band in cfg.bands:
            cfg.logger.info('measuring in {}-band'.format(band))
            dual_exp = None if band == cfg.band_detect else cfg.exp[band]
            sources_band = prim.detect_sources(
                exp_clean,
                cfg.sex_config,
                cfg.sex_io_dir,
                label=label,
                dual_exp=dual_exp,
                delete_created_files=cfg.delete_created_files,
                original_fn=cfg.exp.fn[cfg.band_detect])
            if len(sources_band) > 0:
                sources = hstack([sources, sources_band])
            else:
                cfg.logger.warn('**** no sources found by sextractor ****')
                results = _null_return(cfg, exp_clean)
                return results

        ############################################################
        # Verify detections in other bands using SExtractor
        ############################################################

        all_detections = sources.copy()

        for band in cfg.band_verify:
            cfg.logger.info('verifying dection in {}-band'.format(band))
            sources_verify = prim.detect_sources(
                cfg.exp[band],
                cfg.sex_config,
                cfg.sex_io_dir,
                label=label,
                delete_created_files=cfg.delete_created_files,
                original_fn=cfg.exp.fn[cfg.band_detect])
            if len(sources_verify) > 0:
                match_masks, _ = xmatch(sources,
                                        sources_verify,
                                        max_sep=cfg.verify_max_sep)
                txt = 'cuts: {} out of {} objects detected in {}-band'.format(
                    len(match_masks[0]), len(sources), band)
                cfg.logger.info(txt)
                if len(match_masks[0]) == 0:
                    cfg.logger.warn('**** no matched sources with ' + band +
                                    ' ****')
                    results = _null_return(cfg, exp_clean)
                    return results
                sources = sources[match_masks[0]]
            else:
                cfg.logger.warn('**** no sources detected in ' + band +
                                ' ****')
                results = _null_return(cfg, exp_clean)
                return results

        mask_fracs = utils.calc_mask_bit_fracs(exp_clean)
        cfg.exp.patch_meta.cleaned_frac = mask_fracs['cleaned_frac']
        cfg.exp.patch_meta.bright_obj_frac = mask_fracs['bright_object_frac']

        cfg.logger.info('task completed in {:.2f} min'.format(cfg.timer))
        results = Struct(all_detections=all_detections,
                         sources=sources,
                         hugs_exp=cfg.exp,
                         exp_clean=exp_clean,
                         success=True,
                         synths=cfg.exp.synths)

        cfg.reset_mask_planes()
        return results

    except Exception as e:
        cfg.logger.critical('tract - patch {} - {} failed: {}'.format(
            cfg.tract, cfg.patch, e))
        results = _null_return(cfg)
        return results
Beispiel #43
0
class TestMTL(unittest.TestCase):
    def setUp(self):
        self.targets = Table()
        self.types = np.array(['ELG', 'LRG', 'QSO', 'QSO', 'ELG'])
        self.priorities = [Mx[t].priorities['UNOBS'] for t in self.types]
        self.post_prio = [Mx[t].priorities['MORE_ZGOOD'] for t in self.types]
        self.post_prio[0] = 2  # ELG
        self.post_prio[1] = 2  # LRG...all one-pass
        self.post_prio[2] = 2  # lowz QSO
        self.targets['DESI_TARGET'] = [Mx[t].mask for t in self.types]
        self.targets['BGS_TARGET'] = np.zeros(len(self.types), dtype=np.int64)
        self.targets['MWS_TARGET'] = np.zeros(len(self.types), dtype=np.int64)
        n = len(self.targets)
        self.targets['ZFLUX'] = 10**((22.5 - np.linspace(20, 22, n)) / 2.5)
        self.targets['TARGETID'] = list(range(n))
        # ADM determine the initial PRIORITY and NUMOBS.
        pinit, ninit = initial_priority_numobs(self.targets)
        self.targets["PRIORITY_INIT"] = pinit
        self.targets["NUMOBS_INIT"] = ninit

        # - reverse the order for zcat to make sure joins work
        self.zcat = Table()
        self.zcat['TARGETID'] = self.targets['TARGETID'][-2::-1]
        self.zcat['Z'] = [2.5, 1.0, 0.5, 1.0]
        self.zcat['ZWARN'] = [0, 0, 0, 0]
        self.zcat['NUMOBS'] = [1, 1, 1, 1]
        self.zcat['SPECTYPE'] = ['QSO', 'QSO', 'GALAXY', 'GALAXY']

    def reset_targets(self, prefix):
        """Add prefix to TARGET columns"""

        t = self.targets.copy()
        main_names = ['DESI_TARGET', 'BGS_TARGET', 'MWS_TARGET']

        if prefix == 'CMX':
            # ADM restructure the table to look like a commissioning table.
            t.rename_column('DESI_TARGET', 'CMX_TARGET')
            t.remove_column('BGS_TARGET')
            t.remove_column('MWS_TARGET')
        else:
            for name in main_names:
                t.rename_column(name, prefix + name)


#        if prefix == "SV1_":
# ADM change any occurences of LRG_2PASS to just LRG.
# ADM technically not needed as 2PASS no longer exists.
#            ii = t["SV1_DESI_TARGET"] == Mx.LRG_2PASS
#            t["SV1_DESI_TARGET"][ii] = MxSV.LRG

        return t

    def test_mtl(self):
        """Test output from MTL has the correct column names.
        """
        # ADM loop through once each for the main survey, commissioning and SV.
        for prefix in ["", "CMX_", "SV1_"]:
            t = self.reset_targets(prefix)
            mtl = make_mtl(t, "BRIGHT|GRAY|DARK")
            goodkeys = sorted(
                set(t.dtype.names)
                | set(['NUMOBS_MORE', 'PRIORITY', 'OBSCONDITIONS']))
            mtlkeys = sorted(mtl.dtype.names)
            self.assertEqual(mtlkeys, goodkeys)

    def test_numobs(self):
        """Test priorities, numobs and obsconditions are set correctly with no zcat.
        """
        # ADM loop through once for SV and once for the main survey.
        for prefix in ["", "SV1_"]:
            t = self.reset_targets(prefix)
            mtl = make_mtl(t, "GRAY|DARK")
            mtl.sort(keys='TARGETID')
            self.assertTrue(np.all(mtl['NUMOBS_MORE'] == [1, 1, 4, 4, 1]))
            self.assertTrue(np.all(mtl['PRIORITY'] == self.priorities))
            # - Check that ELGs can be observed in gray conditions but not others
            iselg = (self.types == 'ELG')
            self.assertTrue(
                np.all(
                    (mtl['OBSCONDITIONS'][iselg] & obsconditions.GRAY) != 0))
            self.assertTrue(
                np.all((mtl['OBSCONDITIONS'][~iselg]
                        & obsconditions.GRAY) == 0))

    def test_zcat(self):
        """Test priorities, numobs and obsconditions are set correctly after zcat.
        """
        # ADM loop through once for SV and once for the main survey.
        for prefix in ["", "SV1_"]:
            t = self.reset_targets(prefix)
            mtl = make_mtl(t, "DARK|GRAY", zcat=self.zcat, trim=False)
            mtl.sort(keys='TARGETID')
            pp = self.post_prio.copy()
            nom = [0, 0, 0, 3, 1]
            # ADM in SV, all quasars get all observations.
            if prefix == "SV1_":
                pp[2], nom[2] = pp[3], nom[3]
            self.assertTrue(np.all(mtl['PRIORITY'] == pp))
            self.assertTrue(np.all(mtl['NUMOBS_MORE'] == nom))
            # - change one target to a SAFE (BADSKY) target and confirm priority=0 not 1
            t[prefix + 'DESI_TARGET'][0] = Mx.BAD_SKY
            mtl = make_mtl(t, "DARK|GRAY", zcat=self.zcat, trim=False)
            mtl.sort(keys='TARGETID')
            self.assertEqual(mtl['PRIORITY'][0], 0)

    def test_mtl_io(self):
        """Test MTL correctly handles masked NUMOBS quantities.
        """
        # ADM loop through once for SV and once for the main survey.
        for prefix in ["", "SV1_"]:
            t = self.reset_targets(prefix)
            mtl = make_mtl(t, "BRIGHT", zcat=self.zcat, trim=True)
            testfile = 'test-aszqweladfqwezceas.fits'
            mtl.write(testfile, overwrite=True)
            x = mtl.read(testfile)
            os.remove(testfile)
            if x.masked:
                self.assertTrue(
                    np.all(mtl['NUMOBS_MORE'].mask == x['NUMOBS_MORE'].mask))
Beispiel #44
0
def run(cfg, reset_mask_planes=True):
    """
    Run hugs pipeline using SExtractor for the final detection 
    and photometry.

    Parameters
    ----------
    cfg : hugs_pipe.Config 
        Configuration object which stores all params 
        as well as the exposure object. 

    Returns
    -------
    results : lsst.pipe.base.Struct
        Object containing results:
        results.all_detections : catalog of all detections
        results.sources : catalog of sources we are keeping
        results.exp : exposure object for this run
        results.exp_clean : cleaned exposure object for this run
        results.success : boolean flag of run status 
    """

    assert cfg.tract and cfg.patch, 'No patch id given!'
    cfg.timer # start timer

    ############################################################
    # Get masked image and check if we have enough good data
    ############################################################

    mi = cfg.exp[cfg.band_detect].getMaskedImage()
    mask = mi.getMask()
    stat_task = get_clipped_sig_task()

    if cfg.exp.patch_meta.good_data_frac < cfg.min_good_data_frac:
        cfg.logger.warning('***** not enough data!!! ****')
        results = _null_return(cfg)
        return results

    ############################################################
    # Image thesholding at low and high thresholds. In both 
    # cases, the image is smoothed at the psf scale.
    ############################################################
        
    #mi_smooth = imtools.smooth_gauss(mi, cfg.psf_sigma)
    stats = stat_task.run(mi)
    flux_th = 10**(0.4 * (zpt - cfg.thresh_low['thresh'])) * pixscale**2
    cfg.thresh_low['thresh'] = flux_th / stats.stdev    
    cfg.logger.info('performing low threshold at '
                    '{:.2f} sigma'.format(cfg.thresh_low['thresh']))
    fpset_low = prim.image_threshold(
        mi, mask=mask, plane_name='THRESH_LOW', **cfg.thresh_low)
    flux_th = 10**(0.4 * (zpt - cfg.thresh_high['thresh'])) * pixscale**2
    cfg.thresh_high['thresh'] = flux_th / stats.stdev    
    cfg.logger.info('performing high threshold at '
                    '{:.2f} sigma'.format(cfg.thresh_high['thresh']))
    fpset_high = prim.image_threshold(
        mi, mask=mask, plane_name='THRESH_HIGH', **cfg.thresh_high)

    ############################################################
    # Get "cleaned" image, with noise replacement
    ############################################################

    cfg.logger.info('generating cleaned exposure')
    exp_clean = prim.clean(cfg.exp[cfg.band_detect], fpset_low, **cfg.clean)
    mi_clean = exp_clean.getMaskedImage()
    mask_clean = mi_clean.getMask()

    ############################################################
    # Detect sources and measure props with SExtractor
    ############################################################

    cfg.logger.info('detecting in {}-band'.format(cfg.band_detect))
    label = '{}-{}-{}'.format(cfg.tract, cfg.patch[0], cfg.patch[-1])

    cfg.logger.info('cleaning non-detection bands')
    replace = cfg.exp.get_mask_array(cfg.band_detect)
    for band in cfg.bands:
        if band!=cfg.band_detect:
            mi_band = cfg.exp[band].getMaskedImage()
            noise_array = utils.make_noise_image(mi_band, cfg.rng)
            mi_band.getImage().getArray()[replace] = noise_array[replace]

    sources = Table()

    for band in cfg.bands:
        cfg.logger.info('measuring in {}-band'.format(band))
        dual_exp = None if band==cfg.band_detect else cfg.exp[band]
        sources_band = prim.detect_sources(
            exp_clean, cfg.sex_config, cfg.sex_io_dir, label=label, 
            dual_exp=dual_exp, delete_created_files=cfg.delete_created_files, 
            original_fn=cfg.exp.fn[cfg.band_detect]) 
        if len(sources_band)>0:
            sources = hstack([sources, sources_band])
        else:
            cfg.logger.warn('**** no sources found by sextractor ****')
            results = _null_return(cfg, exp_clean)
            return results

    ############################################################
    # Verify detections in other bands using SExtractor
    ############################################################

    all_detections = sources.copy()

    for band in cfg.band_verify:
        cfg.logger.info('verifying dection in {}-band'.format(band))
        sources_verify = prim.detect_sources(
            cfg.exp[band], cfg.sex_config, cfg.sex_io_dir,
            label=label, delete_created_files=cfg.delete_created_files, 
            original_fn=cfg.exp.fn[band])
        if len(sources_verify)>0:
            match_masks, _ = xmatch(
                sources, sources_verify, max_sep=cfg.verify_max_sep)
            txt = 'cuts: {} out of {} objects detected in {}-band'.format(
                len(match_masks[0]), len(sources), band)
            cfg.logger.info(txt)
            if len(match_masks[0])==0:
                cfg.logger.warn('**** no matched sources with '+band+' ****')
                results = _null_return(cfg, exp_clean)
                return results
            sources = sources[match_masks[0]]
        else:
            cfg.logger.warn('**** no sources detected in '+band+' ****')
            results = _null_return(cfg, exp_clean)
            return results

    mask_fracs = utils.calc_mask_bit_fracs(exp_clean)
    cfg.exp.patch_meta.cleaned_frac = mask_fracs['cleaned_frac']
    cfg.exp.patch_meta.bright_obj_frac = mask_fracs['bright_object_frac']

    cfg.logger.info('task completed in {:.2f} min'.format(cfg.timer))
    results = Struct(all_detections=all_detections,
                     sources=sources,
                     exp=cfg.exp,
                     exp_clean=exp_clean,
                     success=True)

    if reset_mask_planes:
        cfg.reset_mask_planes()

    return results
Beispiel #45
0
       """
       Stellar Fits
       """
       basename='{0}.{1}'.format('pickles', i+1)
       stellar = Table.read('{0}/{1}.zout'.format(folder, basename), format='ascii.commented_header')
       star_best = np.zeros(len(stellar), dtype='S6')
       star_best[stellar['temp_1'] >= -90] = st_names[stellar['temp_1']-1][stellar['temp_1'] >= -90]
       
       schi = stellar['chi_1']/(stellar['nfilt']-1)
       schi[stellar['chi_1'] == -99.] = -99.
       sub_cat['chi_r_stellar'] = schi
       
       sub_cat['stellar_type'] = star_best
       
       if i == 0:
           full_cat = Table.copy(sub_cat)
       else:
           full_cat = vstack([full_cat, sub_cat])
       bar.update()
 
   folder = '{0}/full'.format(pipe_params.working_folder)
   path = '{0}/photoz_all_merged.fits'.format(folder)
   if os.path.isfile(path):
       os.remove(path)
   full_cat.write(path, format='fits')
 
   chis = np.array([full_cat['chi_r_eazy'], full_cat['chi_r_cosmos'], full_cat['chi_r_atlas']])
   chi_best = np.min(chis, axis=0)
   
   
   hdf_hb.close()
    def from_parsec(file_name, log_age=None, zini=None, num_filters=None):
        """
        Read isochrone generated from the `PARSEC website
        <http://stev.oapd.inaf.it/cgi-bin/cmd>`_. If more than one age and/or
        metallicity is included in the file, you must provide the log_age
        and/or zini parameters.


        Parameters
        ----------
        file_name : str
            Isochrone file name.
        log_age : float, optional
            Log of age in years. You must provided this parameter if there is
            more than one age in the isochrone file. Note this function does
            not interpolate ages, so it must be included in the file.
        zini : float, optional
            Initial metal fraction. You must provided this parameter if there
            is more than one metallicity in the isochrone file. Note this
            function does not interpolate metallicity, so it must be included
            in the file.
        num_filters : int, optional
            Number of filters included in the isochrone file. If None, will
            assume the last non-filter parameter is `mbolmag`.

        Returns
        -------
        iso : `artpop.stars.Isochrone`
            PARSEC Isochrone object.
        """
        if os.path.isfile(file_name):
            file = open(file_name, 'r')
            lines = file.readlines()
            file.close()
            for l in lines:
                if 'Zini' in l.split()[1]:
                    names = l.split()[1:]
                    break
            data = np.loadtxt(file_name)
            parsec = Table(data, names=names)
        else:
            raise Exception(f'{file_name} does not exist.')
        isochrone_full = parsec.copy()
        if log_age is not None:
            age_cut = np.abs(parsec['logAge'] - log_age) < 1e-5
            if age_cut.sum() < 1:
                raise Exception(f'log_age = {log_age} not found.')
            parsec = parsec[age_cut]
        if zini is not None:
            zini_cut = np.abs(parsec['Zini'] - zini) < 1e-8
            if zini_cut.sum() < 1:
                raise Exception(f'Zini = {zini} not found.')
            parsec = parsec[zini_cut]
        if num_filters is None:
            filt_idx = np.argwhere(np.array(names) == 'mbolmag')[0][0] + 1
        else:
            filt_idx = len(names) - num_filters
        iso = Isochrone(mini=parsec['Mini'],
                        mact=parsec['Mass'],
                        mags=parsec[names[filt_idx:]],
                        log_L=parsec['logL'],
                        log_Teff=parsec['logTe'])
        iso.isochrone_full = isochrone_full
        return iso
Beispiel #47
0
    def run_final_calibrations(self, initial_priors='parametric'):
        self.generate_evolution_tables()
        output_names = [
            'calib coefs', 'fit variances', 'wavelengths', 'pixels'
        ]
        all_output_names = [
            'calib coefs', 'fit variances', 'wavelengths', 'pixels', 'linelist'
        ]
        mock_spec_w, mock_spec_f = self.mock_spec_w, self.mock_spec_f

        if not self.do_fine_calib:
            print(
                "There doesn't seem to be a fine calibration defined. Using the supplied coarse calibs"
            )

        select_lines = True

        dev_allowance = 1.
        devs = 2.
        using_defaults = False
        if initial_priors == 'defaults':
            if self.default_calibration_coefs is None:
                print(
                    "Couldn't find the default calibration coefficients, so using a parametrization of the coarse coefs"
                )
                initial_coef_table = Table(
                    self.get_parametricfits_of(caltype='coarse'))
            else:
                need_to_parametrize = False
                initial_coef_table = Table(self.default_calibration_coefs)
                for fib in self.instrument.full_fibs[self.camera]:
                    if fib not in initial_coef_table.colnames:
                        need_to_parametrize = True
                        break
                if need_to_parametrize:
                    paramd_table = Table(
                        self.get_parametricfits_of(caltype='default'))
                    for fib in self.instrument.full_fibs[self.camera]:
                        if fib not in initial_coef_table.colnames:
                            initial_coef_table[fib] = paramd_table[fib]
                using_defaults = True
        elif initial_priors == 'medians':
            initial_coef_table = Table(
                self.get_medianfits_of(self.coarse_calibration_coefs))
        else:
            initial_coef_table = Table(
                self.get_parametricfits_of(caltype='coarse'))

        for pairnum, filnums in self.pairings.items():
            if pairnum > 0:
                coarse_table_differences = self.evolution_in_coarse_coefs[
                    pairnum]
                for column in coarse_table_differences.colnames:
                    initial_coef_table[column] = initial_coef_table[
                        column] + coarse_table_differences[column]
            ## HACK!!
            if pairnum == 0 and self.camera == 'r':
                continue
            ## END HACK!
            filenum = filnums[self.filenum_ind]
            data = Table(self.fine_calibrations[filenum].data)

            linelist = self.selected_lines

            effective_iteration = pairnum  #np.max([pairnum,int(using_defaults)])
            if effective_iteration == 0:
                user_input = 'some'
            elif effective_iteration == 1:
                user_input = 'minimal'
            elif effective_iteration > 1:  # and devs < dev_allowance:
                user_input = 'single'  #'none'

            hand_fit_subset = []
            cam = self.camera
            if user_input == 'all':
                hand_fit_subset = list(initial_coef_table.colnames)
            elif user_input in ['some', 'minimal', 'single']:
                if cam == 'r':
                    # specific_set = [cam + '101', cam + '416']
                    specific_set = [
                        cam + '101', cam + '816', cam + '416', cam + '501'
                    ]
                else:
                    specific_set = [
                        cam + '116', cam + '801', cam + '516', cam + '401'
                    ]
                for i, fib in enumerate(specific_set):
                    outfib = ensure_match(fib, data.colnames, hand_fit_subset,
                                          cam)
                    hand_fit_subset.append(outfib)

                if user_input == 'some':
                    seed = int(filenum)
                    np.random.seed(seed)
                    randfibs = [
                        '{:02d}'.format(x)
                        for x in np.random.randint(1, 16, 4)
                    ]
                    for tetn, fibn in zip([2, 3, 6, 7], randfibs):
                        fib = '{}{}{}'.format(cam, tetn, fibn)
                        outfib = ensure_match(fib, data.colnames,
                                              hand_fit_subset, cam)
                        hand_fit_subset.append(outfib)
                elif user_input == 'single':
                    hand_fit_subset = hand_fit_subset[:1]
            else:
                pass

            # hand_fit_subset = np.asarray(hand_fit_subset)

            ##HACK!
            # if pairnum == 0 and self.camera=='r':
            #     altered_coef_table = initial_coef_table.copy()
            #     hand_fit_subset = np.asarray(['r101','r816','r416','r501','r210','r602','r715'])
            #     altered_coef_table = {}#initial_coef_table.copy()
            #     altered_coef_table['r101'] = [5071.8187300612035, 0.9930979838081959, -5.769775729541421e-06,
            #                                   1.6219475654346627e-08, -1.060536238512127e-11, 2.027614894968671e-15]
            #
            #     altered_coef_table['r816'] = [5064.941399949152, 0.9887048293667995, 4.829092351762018e-06,
            #                                   5.280389577236655e-09, -5.618906483279477e-12, 1.1981097537960155e-15]
            #
            #     altered_coef_table['r416'] = [4966.43139830805, 0.9939388787553181, 5.244911711992524e-06,
            #                                   1.2291548669411035e-09, - 2.0296595329597448e-12, 2.9050877132565224e-16]
            #
            #     altered_coef_table['r501'] = [4965.341783218052, 0.9873531089008049, 2.4560812264245633e-05,
            #                                   -2.0293237635901715e-08, 8.081202360788054e-12, -1.397383927434781e-15]
            #
            #     altered_coef_table['r210'] = [5009.879532180203, 0.986418938077269,
            #                                   2.1117286784979934e-05, - 1.612921025968839e-08, 6.307242237439978e-12,
            #                                   -1.175841190977326e-15]
            #
            #     altered_coef_table['r309'] = [4981.847585300046, 0.9953409249278389, 6.616819915490353e-09,
            #                                   7.072942793437885e-09, -4.7799815890757634e-12, 7.369734622022845e-16]
            #
            #     altered_coef_table['r602'] = [4975.080088016758, 0.9916173886456268, 7.811003804278236e-06,
            #                                   1.1977785560589788e-09, -3.3762927213375386e-12, 7.593041888780153e-16]
            #
            #     altered_coef_table['r715'] = [5014.023681360571, 0.99147302071155, 4.748885129798807e-06,
            #                                   3.1454713162197196e-09, -3.4683774647827705e-12, 6.101876288746191e-16]
            #     handfit_fitting_dict = {}
            #     handfit_fitting_dict['calib coefs'] = altered_coef_table
            #     wm,fm = linelist['ThAr']
            # else:
            #     if pairnum==1 and self.camera=='r':
            #         # altered_coef_table,thetype = self.filemanager.locate_calib_dict(fittype='full-ThAr', camera=self.camera,
            #         #                                                  config=self.config, filenum=filenum)
            #         # print(thetype, altered_coef_table)
            #         altered_coef_table = self.filemanager.load_calib_dict(fittype='full-ThAr',cam=self.camera,config=self.config,filenum=1490,timestamp=679621)
            #         initial_coef_table = Table(altered_coef_table['CALIB COEFS'].data)
            ## End HACK!


            handfit_fitting_dict, wm, fm  = \
                                    wavelength_fitting_by_line_selection(data, initial_coef_table,\
                                    self.all_lines, linelist, self.mock_spec_w, self.mock_spec_f ,\
                                    select_lines=select_lines,save_plots=self.save_plots,savetemplate_funcs=self.savetemplate_funcs,\
                                    filenum=filenum,subset=hand_fit_subset,completed_coefs={})

            if select_lines:
                linelistdict = {'ThAr': (wm, fm)}
            else:
                linelistdict = self.selected_lines

            if self.single_core:
                full_fitting_dict, badfits = \
                    auto_wavelength_fitting_by_lines(data, initial_coef_table, handfit_fitting_dict['calib coefs'].copy(), self.all_lines, linelistdict.copy(),\
                                                          mock_spec_w=mock_spec_w,  mock_spec_f=mock_spec_f,\
                                                          filenum=filenum, \
                                                          save_plots=self.save_plots, savetemplate_funcs=self.savetemplate_funcs)

                # for datainfoname,datainfo in handfit_fitting_dict.items():
                #     for fib in datainfo.keys():
                #         full_fitting_dict[datainfoname][fib] = datainfo[fib]

                badfits = np.array(badfits)
            else:
                fib1s = self.instrument.lower_half_fibs[self.camera]
                fib2s = self.instrument.upper_half_fibs[self.camera]

                obs1 = {
                    'comp': data[fib1s.tolist()], 'fulllinelist': self.all_lines.copy(),
                    'coarse_coefs': initial_coef_table, 'linelistdict':linelistdict.copy(), \
                    'mock_spec_w':mock_spec_w.copy(), 'mock_spec_f': mock_spec_f.copy(), \
                    'out_coefs':handfit_fitting_dict['calib coefs'].copy(),'filenum':filenum,
                    'save_plots':self.save_plots, "savetemplate_funcs":self.savetemplate_funcs
                }
                obs2 = {
                    'comp': data[fib2s.tolist()], 'fulllinelist': self.all_lines.copy(),
                    'coarse_coefs': initial_coef_table.copy(), 'linelistdict':linelistdict.copy(), \
                    'mock_spec_w':mock_spec_w.copy(), 'mock_spec_f': mock_spec_f.copy(), \
                    'out_coefs':handfit_fitting_dict['calib coefs'].copy(),'filenum':filenum,
                    'save_plots': self.save_plots, "savetemplate_funcs": self.savetemplate_funcs
                }

                all_obs = [obs1, obs2]
                NPROC = np.clip(len(all_obs), 1, 4)

                with Pool(NPROC) as pool:
                    tabs = pool.map(auto_wavelength_fitting_by_lines_wrapper,
                                    all_obs)

                full_fitting_dict, badfits = tabs[0]
                full_fitting_dict2, badfits2 = tabs[1]

                # ## The hand fit calibrations are in both returned dicts, remove from the second
                # ## Assign the other calibration info from hand fits to the output dicts
                # for datainfoname, datainfo in handfit_fitting_dict.items():
                #     ## use the autofitted wavelength solution even for hand fits, note we're not
                #     ## assigning these values to the output array
                #     if 'coef' in datainfoname:
                #         for fib in datainfo.keys():
                #             full_fitting_dict2[datainfoname].pop(fib)
                #     else:
                #         for fib in datainfo.keys():
                #             full_fitting_dict[datainfoname][fib] = datainfo[fib]

                ## The hand fit calibrations are in both returned dicts, remove from the second
                ## Assign the other calibration info from hand fits to the output dicts
                for datainfoname, datainfo in full_fitting_dict2.items():
                    for fib in datainfo.keys():
                        full_fitting_dict[datainfoname][fib] = datainfo[fib]

                badfits = np.unique(np.append(badfits, badfits2))

            handfit_bad_subset_dict, wm, fm = \
                                                wavelength_fitting_by_line_selection(data, initial_coef_table, \
                                                     self.all_lines, linelistdict, self.mock_spec_w, self.mock_spec_f, \
                                                     select_lines=select_lines, save_plots=self.save_plots,
                                                     savetemplate_funcs=self.savetemplate_funcs, \
                                                     filenum=filenum, subset=badfits,
                                                     completed_coefs=full_fitting_dict['calib coefs'].copy())
            for datainfoname, datainfo in handfit_bad_subset_dict.items():
                for fib in datainfo.keys():
                    full_fitting_dict[datainfoname][fib] = datainfo[fib]

            if select_lines:
                self.selected_lines = full_fitting_dict['linelist'].copy()
                select_lines = False

            ## Zero pad rows so that the table won't throw an error for unequal sizes
            maxlams = int(
                np.max([
                    len(full_fitting_dict['wavelengths'][fib])
                    for fib in full_fitting_dict['wavelengths'].keys()
                ]))

            for fib in full_fitting_dict['wavelengths'].keys():
                nlams = len(full_fitting_dict['wavelengths'][fib])
                if nlams != maxlams:
                    full_fitting_dict['wavelengths'][fib] = np.append(
                        full_fitting_dict['wavelengths'][fib],
                        np.zeros(maxlams - nlams))
                    full_fitting_dict['pixels'][fib] = np.append(
                        full_fitting_dict['pixels'][fib],
                        np.zeros(maxlams - nlams))

            ## Create hdulist to export
            out_hdus = [
                fits.PrimaryHDU(header=self.fine_calibrations[filenum].header)
            ]
            for out_name in output_names:
                curtab = Table()
                curdict = full_fitting_dict[out_name]
                for key in self.instrument.full_fibs[self.camera]:
                    curtab.add_column(Table.Column(data=curdict[key],
                                                   name=key))

                out_hdus.append(
                    fits.BinTableHDU(data=curtab.copy(), name=out_name))

            hdulist = fits.HDUList(out_hdus)

            #out_calib_table = out_calib_table[np.sort(out_calib_table.colnames)]
            self.fine_calibration_coefs[pairnum] = full_fitting_dict[
                'calib coefs'].copy()

            if pairnum > 0:
                devs = find_devs(initial_coef_table,
                                 full_fitting_dict['calib coefs'])

            initial_coef_table = Table(full_fitting_dict['calib coefs'].copy())

            self.final_calibrated_hdulists[pairnum] = hdulist
            self.filemanager.save_full_calib_dict(hdulist,
                                                  self.lampstr_f,
                                                  self.camera,
                                                  self.config,
                                                  filenum=filenum)

            gc.collect()
Beispiel #48
0
class TimeTable:

    masked = False

    def setup(self):

        # Initialize table
        self.table = Table(masked=self.masked)

        # Create column with mixed types
        np.random.seed(12345)
        self.table['i'] = np.arange(1000)
        self.table['a'] = np.random.random(1000)  # float
        self.table['b'] = np.random.random(1000) > 0.5  # bool
        self.table['c'] = np.random.random((1000, 10))  # 2d column
        self.table['d'] = np.random.choice(
            np.array(list(string.ascii_letters)), 1000)

        self.np_table = np.array(self.table)

        self.extra_row = {'a': 1.2, 'b': True, 'c': np.repeat(1, 10), 'd': 'Z'}

        self.extra_column = np.random.randint(0, 100, 1000)

        self.row_indices = np.where(self.table['a'] > 0.9)[0]

        self.table_grouped = self.table.group_by('d')

        # Another table for testing joining
        self.other_table = Table(masked=self.masked)
        self.other_table['i'] = np.arange(1, 1000, 3)
        self.other_table['f'] = np.random.random()
        self.other_table.sort('f')

        # Another table for testing hstack
        self.other_table_2 = Table(masked=self.masked)
        self.other_table_2['g'] = np.random.random(1000)
        self.other_table_2['h'] = np.random.random((1000, 10))

        self.bool_mask = self.table['a'] > 0.6

    def time_table_slice_bool(self):
        table_subset = self.table[self.bool_mask]

    def time_table_slice_int(self):
        table_subset = self.table[self.row_indices]

    def time_column_slice_bool(self):
        col_subset = self.table['a'][self.bool_mask]

    def time_column_slice_int(self):
        col_subset = self.table['a'][self.row_indices]

    def time_column_get(self):
        self.table['c']

    def time_column_make_bool_mask(self):
        self.table['a'] > 0.6

    def time_multi_column_get(self):
        self.table[('a', 'c')]

    def time_column_set(self):
        self.table['a'] = 0.

    def time_column_set_all(self):
        self.table['b'][:] = True

    def time_column_set_row_subset(self):
        self.table['b'][self.bool_mask] = True

    def time_column_set_row_subset_int(self):
        self.table['b'][self.row_indices] = True

    def time_row_get(self):
        self.table[300]

    def time_iter_row(self):
        for row in self.table:
            pass

    def time_read_rows(self):
        for row in self.table:
            tuple(row)

    def time_item_get_rowfirst(self):
        self.table[300]['b']

    def time_item_get_colfirst(self):
        self.table['b'][300]

    def time_add_row(self):
        self.table.add_row(self.extra_row)

    time_add_row.number = 1
    time_add_row.repeat = 1

    def time_remove_row(self):
        self.table.remove_row(6)

    time_remove_row.number = 1
    time_remove_row.repeat = 1

    def time_remove_rows(self):
        self.table.remove_rows(self.row_indices)

    time_remove_rows.number = 1
    time_remove_rows.repeat = 1

    def time_add_column(self):
        self.table['e'] = self.extra_column

    time_add_column.number = 1
    time_add_column.repeat = 1

    def time_remove_column(self):
        self.table.remove_column('a')

    time_remove_column.number = 1
    time_remove_column.repeat = 1

    def time_init_from_np_array_no_copy(self):
        Table(self.np_table, copy=False)

    def time_init_from_np_array_copy(self):
        Table(self.np_table, copy=True)

    def time_copy_table(self):
        self.table.copy()

    def time_copy_column(self):
        self.table['a'].copy()

    def time_group(self):
        self.table.group_by('d')

    def time_aggregate(self):
        # Test aggregate with a function that supports reduceat
        self.table_grouped.groups.aggregate(np.sum)

    def time_aggregate_noreduceat(self):
        # Test aggregate with a function that doesn't support reduceat
        self.table_grouped.groups.aggregate(lambda x: np.sum(x))

    def time_sort(self):
        self.table.sort('a')

    def time_join_inner(self):
        join(self.table, self.other_table, keys="i", join_type='inner')

    def time_join_outer(self):
        join(self.table, self.other_table, keys="i", join_type='outer')

    def time_hstack(self):
        hstack([self.table, self.other_table_2])

    def time_vstack(self):
        vstack([self.table, self.table])
Beispiel #49
0
def main(args):
	print '\n'
	for cat_path in args.catalogues:
		print 'reading %s..' % cat_path
		cat = fits.open(cat_path)[1].data
		t = Table(cat)
		# establish zmax per detection band
		for mcol, mlim, kcorr in zip(args.magcols, args.maglims, args.kcorrs):
			print '\t"%s" at limit: %s' % (mcol, mlim)
			print '\treading "%s" k-corrections..' % kcorr
			kcorr_list = pd.read_csv(kcorr, delimiter=' ')
			max_redshift = find_M_crossing(cat[mcol], mlim, kcorr_list)
			zmax_col = mcol+'_fl%.1f_zmax'%mlim
			t[zmax_col] = max_redshift
		t.write(cat_path, format='fits', overwrite=1)
		cat = t.copy()

		if args.randoms:
			print '\tbuilding clone randoms..'
			# drop points at random in 1x1 square
			Nrand = len(cat) * args.Nrand
			ra = np.random.rand(Nrand)
			dec = np.random.rand(Nrand)
			random_cols = [fitscol(array=ra, name='ra', format='D'),
						   fitscol(array=dec, name='dec', format='D')]
			idcol = cat[args.idcol]
			zcol = np.asarray(cat[args.zcol])
			mask = (zcol > 0)

			# apply any weighting
			if args.zweight is None:
				wcol = None
				bins = 'auto'
			else:
				wcol = cat[args.zweight]
				bins = np.linspace(zcol[mask].min(), zcol[mask].max(), 40)

			# fit to n(z)
			#print '\t\tfitting redshift distribution..'
			#zg, Pz = fit_smail(zcol[mask], quiet=1, weights=wcol, bins=bins).T
			zg = Pz = None

			# construct redshift distribution per band
			print '\t\tcloning..'
			for mcol, mlim in zip(args.magcols, args.maglims):
				zmax_col = mcol+'_fl%.1f_zmax'%mlim
				maxcol = cosmo.comoving_distance(cat[zmax_col]).value
				randoms_id_z = clone_galaxies(idcol, maxcol, args.Nrand, zg=zg, Pz=Pz, zlims=args.zlims)
				zmin, zmax = 0., randoms_id_z[:, 1].max()
				zgrid = np.linspace(zmin, zmax, 100)
				dgrid = cosmo.comoving_distance(zgrid)
				randoms_comoving = interp1d_(randoms_id_z[:, 1], zgrid, dgrid)
				random_cols.append(fitscol(array=randoms_id_z[:, 0], name=mcol+'_cloneID', format='K'))
				random_cols.append(fitscol(array=randoms_id_z[:, 1], name=mcol+'_cloneZ', format='D'))
				random_cols.append(fitscol(array=randoms_comoving, name=mcol+'_cloneComovingDist', format='D'))

			# save randoms catalogue with galaxy IDs and redshifts for selection
			random_cols = fits.ColDefs(random_cols)
			rand_hdu = fits.BinTableHDU.from_columns(random_cols)
			if args.o is None:
				out = cat_path.replace('.fits', '_CloneZIDRandoms.fits')
			else:
				out = args.o
			rand_hdu.writeto(out, overwrite=1)

	print 'done!'