예제 #1
0
파일: catalog.py 프로젝트: fred3m/astropyp
def mask_catalog_columns(catalog, idx, columns=None, 
        catname=None, new_columns=None,
        catalog_kwargs=None):
    """
    Mask all of the rows in a table (or subset of columns in a table) 
    with a masked index and (optionally) rename the columns.
    
    Parameters
    ----------
    catalog: `~astropy.table.Table` or `~Catalog`
        Catalog or Table to be masked
    idx: `~numpy.ma.array`
        Masked array of indices to use for updating the catalog
    columns: list of strings, optional
        Columns to include in the masked table. If ``columns is None`` 
        (default) then all of the columns are included in the masked catalog.
    catname: str, optional
        Name of catalog. This is only necessary if you with to rename the
        columns of the catalog for stacking later. See ``new_columns`` 
        for more.
    new_columns: list of strings, optional
        New names for the columns. This may be useful if you are combining 
        catalogs and want to standardize the column names. If
        ``new_columns is None`` (default) then if a ``catname`` is provided all
        of the columns are renamed to 'columnname_catname', otherwise 
        the original column names are used.
    catalog_kwargs: dict
        If the result is desired to be a `~astropyp.catalog.Catalog` then
        these are the kwargs to specify when initializing the catalog
        (for example names of the ra,dec,x,y columns). Otherwise
        an `~astropy.table.Table` is returned
    
    Returns
    -------
    tbl: `~astropy.table.Table` or `~astropyp.catalog.Catalog`
        Catalog created by applying the masked index. The type of object
        returned depends on if ``catalog_kwargs is None`` (default), which
        returns a Table. Otherwise a Catalog is returned.
    """
    from astropy.table import Table
    if isinstance(catalog, Catalog):
        tbl = catalog.sources
    else:
        tbl = catalog
    new_tbl = Table(masked=True)
    if columns is None:
        columns = tbl.columns.keys()
    if new_columns is None:
        if catname is None:
            new_columns = columns
        else:
            new_columns = ['{0}_{1}'.format(col,catname) for col in columns]
    for n, col in enumerate(columns):
        new_tbl[new_columns[n]] = misc.update_ma_idx(tbl[col], idx)
    if catalog_kwargs is not None:
        new_tbl = Catalog(new_tbl, **catalog_kwargs)
    return new_tbl
예제 #2
0
파일: stack.py 프로젝트: fred3m/astropyp
 def merge_catalogs(self, good_indices=None, save_catalog=True):
     """
     Combine catalogs from each image into a master catalog that
     matches all of the source (by default, although specifying
     ``good_indices`` allows to only merge a subset of each
     catalog)
     
     Parameters
     ----------
     good_indices: string or list of arrays, optional
         If good_indices is a string, index ``ccd.indices[good_indices]``
         will be used for each CCD to filter the sources. Otherwise
         good_indices should be a list of indices to use for each CCD.
         If no god_indices are specified then all sources will be
         merged
     
     save_catalog: bool
         Whether or not to save the catalog
     
     Result
     ------
     catalog: `~astropyp.catalog.Catalog`
         Catalog of merged positions and indices for the sources
         in each image.
     """
     import astropyp.catalog
     from astropyp.utils.misc import update_ma_idx
     from astropy import table
     from astropy.extern import six
     
     if good_indices is None:
         all_ra = [ccd.catalog.ra for ccd in self.ccds]
         all_dec = [ccd.catalog.dec for ccd in self.ccds]
     else:
         if isinstance(good_indices, six.string_types):
             good_indices = [ccd.indices[good_indices] for ccd in self.ccds]
         all_ra = [ccd.catalog.ra[good_indices[n]] 
             for n, ccd in enumerate(self.ccds)]
         all_dec = [ccd.catalog.dec[good_indices[n]] 
             for n, ccd in enumerate(self.ccds)]
     indices, matched = astropyp.catalog.get_merged_indices(all_ra, all_dec)
     all_x = []
     all_y = []
     for n,ccd in enumerate(self.ccds):
         ccd.indices['merge'] = indices[n]
         if n!=self.ref_index:
             x0,y0 = self.tx_solutions[
                 (n,self.ref_index)].transform_coords(ccd.catalog)
         else:
             x0,y0 = (ccd.catalog.x, ccd.catalog.y)
         if good_indices is not None:
             x0 = x0[good_indices[n]]
             y0 = y0[good_indices[n]]
         all_x.append(x0)
         all_y.append(y0)
     all_x = [update_ma_idx(all_x[n], self.ccds[n].indices['merge'])
         for n in self.ccd_indices]
     all_y = [update_ma_idx(all_y[n], self.ccds[n].indices['merge'])
         for n in self.ccd_indices]
     sources = table.Table(masked=True)
     sources['x'],sources['y'] = astropyp.catalog.combine_coordinates(
         all_x, all_y, 'mean')
     for n,ccd in enumerate(self.ccds):
         x_col = 'x_{0}'.format(n)
         y_col = 'y_{0}'.format(n)
         sources[x_col] = all_x[n]
         sources[y_col] = all_y[n]
         if good_indices is None:
             src_idx = self.ccds[n].catalog['src_idx']
         else:
             src_idx = self.ccds[n].catalog['src_idx'][good_indices[n]]
         src_idx = update_ma_idx(src_idx, 
             self.ccds[n].indices['merge'])
         sources['idx_{0}'.format(n)] = src_idx
         x_diff = sources[x_col]-sources['x']
         y_diff = sources[y_col]-sources['y']
         logger.info('x rms: {0}'.format(
             np.sqrt(np.sum(x_diff)**2/len(sources))))
         logger.info('y rms: {0}'.format(
             np.sqrt(np.sum(y_diff)**2/len(sources))))
     catalog = astropyp.catalog.Catalog(sources)
     if save_catalog:
         self.merged_catalog = catalog
     return catalog
예제 #3
0
 def perform_psf_photometry(self, separation=None, 
         verbose=False, fit_position=True, pos_range=0, indices=None,
         kd_tree=None, exptime=None, pool_size=None, stack_method='mean',
         save_catalog=True, single_thread=False):
     """
     Perform PSF photometry on all of the sources in the catalog,
     or if indices is specified, a subset of sources.
 
     Parameters
     ----------
     separation: float, optional
         Separation (in pixels) for members to be considered
         part of the same group *Default=1.5*psf width*
     verbose: bool, optional
         Whether or not to show info about the fit progress.
         *Default=False*
     fit_position: bool, optional
         Whether or not to fit the position along with the
         amplitude of each source. *Default=True*
     pos_range: int, optional
         Maximum number of pixels (in image pixels) that
         a sources position can be changed. If ``pos_range=0``
         no bounds will be set. *Default=0*
     indices: `~numpy.ndarray` or string, optional
         Indices for sources to calculate PSF photometry.
         It is often advantageous to remove sources with
         bad pixels and sublinear flux to save processing time.
         All sources not included in indices will have their
         psf flux set to NaN. This can either be an array of
         indices for the positions in self.catalog or 
         the name of a saved index in self.indices
     """
     import astropyp.catalog
     from astropyp.phot.psf import SinglePSF
     
     # If no exposure time was passed to the stack and all of the
     # ccds had the same exposure time, use that to calcualte the
     # psf magnitude
     if exptime is None:
         if hasattr(self, 'exptime'):
             exptime = self.exptime
         else:
             same_exptime = True
             exptime = np.nan
             for ccd in self.ccds:
                 if np.isnan(exptime):
                     exptime = ccd.exptime
                 elif ccd.exptime != exptime:
                     same_exptime = False
             if not same_exptime:
                 exptime = None
     
     if hasattr(self.catalog, 'peak'):
         peak = self.catalog.peak
     else:
         from astropyp.utils.misc import update_ma_idx
         peak = [
             update_ma_idx(self.ccds[n].catalog['peak'],
             self.catalog['idx_'+str(n)]) 
                 for n in self.ccd_indices]
         peak = np.ma.mean(peak, axis=0)
     
     # Get the positions and estimated amplitudes of
     # the sources to fit
     
     if indices is not None:
         positions = zip(self.catalog.x[indices], 
                         self.catalog.y[indices])
         amplitudes = peak[indices]
     else:
         positions = zip(self.catalog.x, self.catalog.y)
         amplitudes = peak
 
     src_count = len(positions)
 
     src_indices = np.arange(0,len(self.catalog.sources),1)
     all_positions = np.array(zip(self.catalog.x, self.catalog.y))
     all_amplitudes = peak
     total_sources = len(all_amplitudes)
     src_psfs = []
 
     psf_flux = np.ones((total_sources,))*np.nan
     psf_flux_err = np.ones((total_sources,))*np.nan
     psf_x = np.ones((total_sources,))*np.nan
     psf_y = np.ones((total_sources,))*np.nan
     new_amplitudes = np.ones((total_sources,))*np.nan
 
     # Find nearest neighbors to avoid contamination by nearby sources
     if separation is not None:
         if not hasattr(self, 'kdtree'):
             from scipy import spatial
             KDTree = spatial.cKDTree
             self.kd_tree = KDTree(all_positions)
         idx, nidx = astropyp.catalog.find_neighbors(separation, 
             kd_tree=self.kd_tree)
     # Get parameters to pass to the pool manager
     imgs = []
     dqmasks = []
     tx_solutions = []
     for n,ccd in enumerate(self.ccds):
         imgs.append(ccd.img)
         dqmasks.append(ccd.dqmask)
         if n!= self.ref_index:
             tx_solutions.append(self.tx_solutions[(self.ref_index, n)])
         else:
             tx_solutions.append(None)
     
     if pool_size is None:
         pool_size = multiprocessing.cpu_count()
     # Create a pool with the static (for all sources)
     # variables to speed up processing
     pool = multiprocessing.Pool(
         processes=pool_size,
         initializer=_init_multiprocess,
         initargs=(imgs, dqmasks, tx_solutions, self.psf.shape,
             self.ref_index, stack_method, self.subsampling, 
             self.psf))
     pool_args = []
     # Fit each source to the PSF, calcualte its flux, and its new
     # position
     for n in range(len(positions)):
         if indices is not None:
             src_idx = src_indices[indices][n]
         else:
             src_idx = src_indices[n]
         if separation is not None:
             n_indices = nidx[idx==src_idx]
             neighbor_positions = all_positions[n_indices]
             neighbor_amplitudes = all_amplitudes[n_indices]
         else:
             neighbor_positions = []
             neighbor_amplitudes = []
         pool_args.append((amplitudes[n], 
             (positions[n][1], positions[n][0]),
             self.subsampling,
             neighbor_positions,
             neighbor_amplitudes))
 
     result = pool.map(_stack_psf_worker, pool_args)
     pool.close()
     pool.join()
 
     # Update the various psf array parameters
     for n in range(len(positions)):
         if indices is not None:
             src_idx = src_indices[indices][n]
         else:
             src_idx = src_indices[n]
         psf_flux[src_idx] = result[n][0]
         psf_flux_err[src_idx] = result[n][1]
         new_amplitudes[src_idx] = result[n][2]
         psf_x[src_idx], psf_y[src_idx] = result[n][3]
 
     # Save the psf derived quantities in the catalog
     # Ignore divide by zero errors that occur when sources
     # have zero psf flux (i.e. bad sources)
     if save_catalog:
         np_err = np.geterr()
         np.seterr(divide='ignore')
         self.catalog.sources['psf_flux'] = psf_flux
         self.catalog.sources['psf_flux_err'] = psf_flux_err
         if exptime is not None:
             psf_mag = -2.5*np.log10(psf_flux/exptime)
             self.catalog.sources['psf_mag'] = psf_mag
             self.catalog.sources['psf_mag_err'] = psf_flux_err/psf_flux
         self.catalog.sources['psf_x'] = psf_x
         self.catalog.sources['psf_y'] = psf_y
         np.seterr(**np_err)
     
     return psf_flux, psf_flux_err
예제 #4
0
파일: catalog.py 프로젝트: fred3m/astropyp
def get_all_merged_indices(all_coord1, all_coord2, pool_size=None, 
        separation=1/3600., merge_type='outer'):
    """
    Get masked indices for a set of ra,dec that merge the 
    sources together using an outer join
    
    Parameters
    ----------
    all_coord1: list of array-like
        List of arrays of values for the first coordinate (usually RA or X)
    all_coord2: list of array-like
        List of arrays of values for the second coordinate (usually DEC or Y)
    pool_size: int, optional
        Number of processors to use to match coordinates. If 
        ``pool_size is None`` (default) then the maximum number
        of processors is used.
    separation: float, optional
        Maximum distance between two coordinates for a match. The default
        is ``1/3600``, or 1 arcsec.
    merge_type: str, optional
        Type of merge to use. This must be 'outer','inner', 'left', or 'right'.
        The default is 'outer'.
    
    Returns
    -------
    indices: list of masked arrays
        Indices to match each catalog in all_coord1, all_coord2 to the
        master catalog
    matched: array
        Indices of rows that has an entry for *every* set of coordinates
    all_duplicates: array
        Indices of rows that have duplicate values
    mean_coord1: array
        Average coord1 for each row
    mean_coord2: array
        Average coord2 for each row
    """
    from astropyp.utils import misc
    
    if merge_type not in ['outer','inner','left','right']:
        raise ValueError(
            "merge_type must be 'outer','inner', 'left', or 'right'")
    
    # Initialize indices and coordinates
    indices = [np.ma.array([n for n in 
        range(all_coord1[m].shape[0])], dtype=int) 
            for m in range(len(all_coord1))]
    mean_coord1 = np.ma.array(all_coord1[0])
    mean_coord2 = np.ma.array(all_coord2[0])
    all_duplicates = np.zeros(mean_coord1.shape, dtype=bool)
    
    # Create merged indices
    for n in range(1,len(all_coord1)):
        idx0, idx1, duplicates = get_merged_indices(
            (all_coord1[n],all_coord2[n]), 
            ref_coords=(mean_coord1,mean_coord2), 
            pool_size=pool_size, separation=separation)
        new_idx, new_unmatched = idx0
        ref_idx, ref_unmatched = idx1
        
        # Update list of duplicates
        duplicates_unmatched = all_duplicates[ref_unmatched]
        all_duplicates = all_duplicates[ref_idx]
        all_duplicates[duplicates] = True
        
        # Update indices
        if merge_type=='outer' or merge_type=='left':
            ref_idx = np.hstack([ref_idx, ref_unmatched])
            new_idx = np.hstack([new_idx, 
                -np.ones(ref_unmatched.shape, dtype=int)])
            all_duplicates = np.hstack([all_duplicates, duplicates_unmatched])
        if merge_type=='outer' or merge_type=='right':
            ref_idx = np.hstack([ref_idx, 
                -np.ones(new_unmatched.shape, dtype=int)])
            new_idx = np.hstack([new_idx, new_unmatched])
            all_duplicates = np.hstack([all_duplicates, 
                np.zeros(new_unmatched.shape, dtype=bool)])
        # Mask indices
        ref_mask = ref_idx<0
        new_mask = new_idx<0
        ref_idx = np.ma.array(ref_idx, mask=ref_mask)
        new_idx = np.ma.array(new_idx, mask=new_mask)
        
        # Update the mean coordinate values
        mean_coord1 = misc.update_ma_idx(mean_coord1,ref_idx)
        mean_coord2 = misc.update_ma_idx(mean_coord2,ref_idx)
        new_coord1 = misc.update_ma_idx(all_coord1[n],new_idx)
        new_coord2 = misc.update_ma_idx(all_coord2[n],new_idx)
        mean_coord1 = np.ma.mean(
            np.ma.vstack([mean_coord1, new_coord1]), axis=0)
        mean_coord2 = np.ma.mean(
            np.ma.vstack([mean_coord2, new_coord2]), axis=0)
        # Update all of the indices with the new matches
        for m in range(n):
            indices[m] = misc.update_ma_idx(indices[m],ref_idx)
        indices[n] = new_idx
        
    matched = np.sum([i.mask for i in indices],axis=0)==0
    return indices, matched, all_duplicates, mean_coord1, mean_coord2