)  # New bootes cat, everything

# Comparing the catalog to Mark's photoz

c = SkyCoord(ra=mpz['RA'] * u.degree, dec=mpz['DEC'] * u.degree)  # Mark's Cat
catalog = SkyCoord(ra=boot_gal['ra'] * u.degree,
                   dec=boot_gal['dec'] * u.degree)  # gal_cat
idx, d2d, d3d = c.match_to_catalog_sky(catalog)

# idx are indices into 'catalog' that are closest match to each of the coordinates in c
# and d2d is the on sky distances between them.

new = boot_gal[
    idx]  # same length as mpz, closest match in boot_gal for each entry in mpz.
new = new.filled(fill_value=-99.0)
new.add_column(Column(d2d.arcsec, name='sep'))
new = new[
    new['sep'] <=
    1.0]  # I have checked the histogram of the sep values. All the values are < 0.025.
# Which means all the objects in Mark's catalog are found in the merged catalog. Basically Mark's catalog is
# a subset of the merged catalog with SNR in IRAC/ch2 >= 5.0.
new = hstack([new, mpz['Z', 'TYPE',
                       'PZ']])  # table with photometry and mark's photo-z
new.write(
    '/Users/rscm9/Research/repository/CADS/candidacy/catalogs/v2/'
    '0_bootes_catalog_mark_pz_ch2_5sigma.fits',
    format='fits',
    overwrite=True)

#%%
""" Constructing a High-z Catalog """
Пример #2
0
 def _setup(self, table_type):
     self.data = OrderedDict([('a', Column(name='x', data=[1, 3])),
                              ('b', [2, 4]),
                              ('c', np.array([3, 5], dtype='i8'))])
Пример #3
0
def phot_calib(candidates_list, telescope, catalog='II/349/ps1', radius = 3, doPlot=True):
    """Perform photometric calibration using catalogs"""

    delta_mag_median_list = []
    filename_list = []

    # Get sources 
    for i, key in enumerate(candidates_list.group_by('filenames').groups.keys) :
        print ('Processing photometric calibration for ', key[0])

        # Get pixel scale in degrees
        header = fits.getheader(key[0])
        try:
            pixScale = abs(header['CDELT1'])
        except Exception:
            try:
                pixScale = abs(header['_DELT1'])
            except Exception:
                try:
                    pixScale = abs(header['CD1_1'])
                except Exception:
                    print ('Pixel scale could not be found in fits header.\n Expected keyword: CDELT1, _DELT1 or CD1_1')


        # Get filter
        #band_DB, band_cat = get_filter(key[0], telescope)
        band_DB = 'Clear'
        band_cat = 'g+r'

        detected_sources = ascii.read(key[0]+'.magwcs', names=['Xpos','Ypos','_RAJ2000','_DEJ2000', 'mag_inst', 'mag_inst_err', 'filenames'])
        # Add units
        detected_sources['_RAJ2000'] *= u.deg
        detected_sources['_DEJ2000'] *= u.deg
        # Add index for each source
        detected_sources['idx'] = np.arange(len(detected_sources))

        crossmatch = xmatch(detected_sources, catalog, radius*pixScale*3600)
        # Initialise flag array. 0: unknown sources / 1: known sources
        flag = np.zeros(len(crossmatch))
        # Do not consider duplicates
        referenced_star_idx = np.unique(crossmatch['idx'])

        crossmatch['id'] = np.arange(len(crossmatch))
        closest_id = []
        for idx in referenced_star_idx:
            mask = crossmatch['idx'] == idx
            closest_id.append(crossmatch[mask]['id'][0])
        
        # Set flag indexes to 1 for detected sources associated to a star
        flag[closest_id] = 1


        ref_sources = crossmatch[flag == 1]

        bands = band_cat.split('+')
        # Bunch of conditions to filter the catalog data
        mask = ref_sources['Nd'] > 15
        ref_sources=ref_sources[mask]
        for band in bands:
            mask = ref_sources['%sFlags' % band] == 115000
            ref_sources=ref_sources[mask]

        #mask = ((ref_sources['Nd'] > 15) & (ref_sources['rFlags'] == 115000) & (ref_sources['gFlags'] == 115000))

        # Combine filter bands if needed 
        if len (bands) > 1:
            #jansky = []
            jansky = np.zeros(len(ref_sources))
            for band in bands:
                jansky = jansky + 3631 * 10**(-0.4*(ref_sources['%smag' % band]))
            newmag = -2.5*np.log10(jansky/(3631))
        else:
            newmag = ref_sources['%smag' % band]


        # Remove objects to get all objects with delte_mag < 1 sigma
        delta_mag = ref_sources['mag_inst'] - newmag
        delta_mag_median = np.median(delta_mag)
        delta_mag_std = np.std(delta_mag)
        counter = 1
        while (np.max(abs(delta_mag)) > abs(delta_mag_median) + delta_mag_std) and (counter <= 5):
            
            #print (len(delta_mag), delta_mag_median, delta_mag_std)
            mask = abs(delta_mag) < abs(delta_mag_median) + delta_mag_std
            ref_sources = ref_sources[mask]
            newmag = newmag[mask]
             
            delta_mag = ref_sources['mag_inst'] - newmag
            delta_mag_median = np.median(delta_mag)
            delta_mag_std = np.std(delta_mag)
            counter += 1

        #delta_mag_std = np.std(delta_mag)
        ref_sources.write('ZP_%d.dat' % i, format='ascii.commented_header', overwrite=True)
        if doPlot:
            ref_sources.show_in_browser(jsviewer=True)
            plt.scatter(ref_sources['mag_inst'], ref_sources['rmag'], color ='blue')
            plt.scatter(ref_sources['mag_inst'], newmag, color='red')
            plt.plot(ref_sources['mag_inst'], ref_sources['mag_inst']-delta_mag_median, color='green')
            plt.savefig('ZP_%d.png' % i)
            plt.show()

        delta_mag_median_list.append(delta_mag_median)
        filename_list.append(key[0])

    
    # Apply photmetric calibration to candidates
    mag_calib_col = Column(np.zeros(len(candidates_list)), name='mag_calib')
    mag_calib_err_col = Column(np.zeros(len(candidates_list)), name='mag_calib_err')
    magsys_col = Column(['None']*len(candidates_list), name='magsys')
    filter_cat_col = Column(['None']*len(candidates_list), name='filter_cat')
    filter_DB_col = Column(['NoFilterFound']*len(candidates_list), name='filter_DB')

    candidates_list.add_columns([mag_calib_col, mag_calib_err_col, magsys_col, filter_cat_col, filter_DB_col])

    for j, filename in enumerate(filename_list):
        mask = candidates_list['filenames'] == filename
        candidates_list['mag_calib'][mask] = candidates_list['mag_inst'][mask] - delta_mag_median
        # Quadratic sum of statistics and calibration errors. 
        candidates_list['mag_calib_err'][mask] = np.sqrt(candidates_list['mag_inst_err'][mask]**2 + delta_mag_std**2)

        # Define magnitude system
        if catalog == 'II/349/ps1':
            candidates_list['magsys'][mask] = 'AB'
        else:
            pass

        candidates_list['filter_cat'][mask] = band_cat
        candidates_list['filter_DB'][mask] = band_DB


    candidates_list.write('tot_cand2.dat', format='ascii.commented_header', overwrite=True)


    return  candidates_list
Пример #4
0
 def setup_method(self, table_type):
     self._setup(table_type)
     self.data = [(np.int32(1), np.int32(3)),
                  Column(name='col1', data=[2, 4], dtype=np.int32),
                  np.array([3, 5], dtype=np.int32)]
Пример #5
0
 def _setup(self, table_type):
     self.data = dict([('a', Column([1, 3], name='x')),
                       ('b', [2, 4]),
                       ('c', np.array([3, 5], dtype='i8'))])
Пример #6
0
def daofind(data,
            threshold,
            fwhm,
            ratio=1.0,
            theta=0.0,
            sigma_radius=1.5,
            sharplo=0.2,
            sharphi=1.0,
            roundlo=-1.0,
            roundhi=1.0,
            sky=0.0,
            exclude_border=False):
    """
    Detect stars in an image using the DAOFIND algorithm.

    `DAOFIND`_ (`Stetson 1987; PASP 99, 191
    <http://adsabs.harvard.edu/abs/1987PASP...99..191S>`_) searches
    images for local density maxima that have a peak amplitude greater
    than ``threshold`` (approximately; ``threshold`` is applied to a
    convolved image) and have a size and shape similar to the defined 2D
    Gaussian kernel.  The Gaussian kernel is defined by the ``fwhm``,
    ``ratio``, ``theta``, and ``sigma_radius`` input parameters.

    .. _DAOFIND: http://iraf.net/irafhelp.php?val=daofind&help=Help+Page

    ``daofind`` finds the object centroid by fitting the the marginal x
    and y 1D distributions of the Gaussian kernel to the marginal x and
    y distributions of the input (unconvolved) ``data`` image.

    ``daofind`` calculates the object roundness using two methods.  The
    ``roundlo`` and ``roundhi`` bounds are applied to both measures of
    roundness.  The first method (``roundness1``; called ``SROUND`` in
    `DAOFIND`_) is based on the source symmetry and is the ratio of a
    measure of the object's bilateral (2-fold) to four-fold symmetry.
    The second roundness statistic (``roundness2``; called ``GROUND`` in
    `DAOFIND`_) measures the ratio of the difference in the height of
    the best fitting Gaussian function in x minus the best fitting
    Gaussian function in y, divided by the average of the best fitting
    Gaussian functions in x and y.  A circular source will have a zero
    roundness.  An source extended in x or y will have a negative or
    positive roundness, respectively.

    The sharpness statistic measures the ratio of the difference between
    the height of the central pixel and the mean of the surrounding
    non-bad pixels in the convolved image, to the height of the best
    fitting Gaussian function at that point.

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    threshold : float
        The absolute image value above which to select sources.

    fwhm : float
        The full-width half-maximum (FWHM) of the major axis of the
        Gaussian kernel in units of pixels.

    ratio : float, optional
        The ratio of the minor to major axis standard deviations of the
        Gaussian kernel.  ``ratio`` must be strictly positive and less
        than or equal to 1.0.  The default is 1.0 (i.e., a circular
        Gaussian kernel).

    theta : float, optional
        The position angle (in degrees) of the major axis of the
        Gaussian kernel measured counter-clockwise from the positive x
        axis.

    sigma_radius : float, optional
        The truncation radius of the Gaussian kernel in units of sigma
        (standard deviation) [``1 sigma = FWHM /
        (2.0*sqrt(2.0*log(2.0)))``].

    sharplo : float, optional
        The lower bound on sharpness for object detection.

    sharphi : float, optional
        The upper bound on sharpness for object detection.

    roundlo : float, optional
        The lower bound on roundess for object detection.

    roundhi : float, optional
        The upper bound on roundess for object detection.

    sky : float, optional
        The background sky level of the image.  Setting ``sky`` affects
        only the output values of the object ``peak``, ``flux``, and
        ``mag`` values.  The default is 0.0, which should be used to
        replicate the results from `DAOFIND`_.

    exclude_border : bool, optional
        Set to `True` to exclude sources found within half the size of
        the convolution kernel from the image borders.  The default is
        `False`, which is the mode used by `DAOFIND`_.

    Returns
    -------
    table : `~astropy.table.Table`

        A table of found objects with the following parameters:

        * ``id``: unique object identification number.
        * ``xcentroid, ycentroid``: object centroid.
        * ``sharpness``: object sharpness.
        * ``roundness1``: object roundness based on symmetry.
        * ``roundness2``: object roundness based on marginal Gaussian
          fits.
        * ``npix``: number of pixels in the Gaussian kernel.
        * ``sky``: the input ``sky`` parameter.
        * ``peak``: the peak, sky-subtracted, pixel value of the object.
        * ``flux``: the object flux calculated as the peak density in
          the convolved image divided by the detection threshold.  This
          derivation matches that of `DAOFIND`_ if ``sky`` is 0.0.
        * ``mag``: the object instrumental magnitude calculated as
          ``-2.5 * log10(flux)``.  The derivation matches that of
          `DAOFIND`_ if ``sky`` is 0.0.

    See Also
    --------
    irafstarfind

    Notes
    -----
    For the convolution step, this routine sets pixels beyond the image
    borders to 0.0.  The equivalent parameters in `DAOFIND`_ are
    ``boundary='constant'`` and ``constant=0.0``.

    References
    ----------

    .. [1] Stetson, P. 1987; PASP 99, 191 (http://adsabs.harvard.edu/abs/1987PASP...99..191S)
    .. [2] http://iraf.net/irafhelp.php?val=daofind&help=Help+Page
    .. [3] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?daofind
    """

    daofind_kernel = _FindObjKernel(fwhm, ratio, theta, sigma_radius)
    threshold *= daofind_kernel.relerr
    objs = _findobjs(data,
                     threshold,
                     daofind_kernel,
                     exclude_border=exclude_border)
    tbl = _daofind_properties(objs, threshold, daofind_kernel, sky)
    if len(objs) == 0:
        warnings.warn('No sources were found.', AstropyUserWarning)
        return tbl  # empty table
    table_mask = ((tbl['sharpness'] > sharplo) & (tbl['sharpness'] < sharphi) &
                  (tbl['roundness1'] > roundlo) & (tbl['roundness1'] < roundhi)
                  & (tbl['roundness2'] > roundlo) &
                  (tbl['roundness2'] < roundhi))
    tbl = tbl[table_mask]
    idcol = Column(name='id', data=np.arange(len(tbl)) + 1)
    tbl.add_column(idcol, 0)
    if len(tbl) == 0:
        warnings.warn(
            'Sources were found, but none pass the sharpness and '
            'roundness criteria.', AstropyUserWarning)
    return tbl
Пример #7
0
def go(fCat='GaiaCatalog0.ASC', \
       fHeader='V404_Cyg_adOFF-012_R_120sTest_MAPPED.fit', \
       colBlobLength='A_IMAGE', blobLenDefault=5.):
    
    """Plotting the Co-ordinates in a Click-Animated Sequence"""
    
   
    #Objects with Flux > 5000
    # tDUM = Table.read('GaiaCatalog0.ASC', format='ascii.sextractor')
    tDUM = Table.read(fCat, format='ascii.sextractor')
   
    
    # Let's set a conditional on whether our blob length column is present
    if colBlobLength in tDUM.colnames:
        vBlobLength = tDUM[colBlobLength]
    else:
        print("programday_1 INFO - column %s not found. Generating a dummy length" %(colBlobLength))
    # let's add our dummy length column to the table.
        colBlobLength = "%s_GEN" % (colBlobLength)
        tDUM[colBlobLength] = np.repeat(blobLenDefault, len(tDUM))

    # return

    #finding time from the fits header
    #hdul = fits.open('V404_Cyg_adOFF-012_R_120sTest_MAPPED.fit')
    ##hdul.info()
    #myHeader = fits.getheader('V404_Cyg_adOFF-012_R_120sTest_MAPPED.fit')
    myHeader = fits.getheader(fHeader)
    time=myHeader['DATE-OBS']
    #print(time)

    # Let's promote the world coordinate system parsing out to here,
    # since we're going to need it whatever we do
    wcs = WCS(myHeader)


    #Define the good data
    xCut = 5000
    bGood = tDUM['FLUX_ISO'] > xCut
    # let's get the image dimensions from the header for our frame
    # boundary rectangle:
    nX = myHeader['NAXIS1']
    nY = myHeader['NAXIS2']

    boundsX = np.asarray([0., nX, nX, 0.], 'float')
    boundsY = np.asarray([0., 0., nY, nY], 'float')

    boundsX = np.hstack((boundsX, boundsX[0]))
    boundsY = np.hstack((boundsY, boundsY[0]))

    #print(boundsX)
    #print(boundsY)
        # Now that we've read in the header for the image, use it to
        # convert pixel coords to sky coords if they didn't come in with
        # the catalog. Reference:
    
     # https://docs.astropy.org/en/stable/wcs/
 
            
    fig=plt.figure()
    fig.suptitle('Object Locations in Azimuth and Altitude')
    fig.clf()
    yLabel6=plt.ylabel('Altitude')
    xLabel6=plt.xlabel('Azimuth')
    ax6=fig.add_subplot(111)
    UMD_Observatory= EarthLocation(lat=41.32*u.deg, lon=-83.24*u.deg )
       #GEOMETRY OF THE END POINTS OF EACH ELIPSE
    EL=tDUM['A_IMAGE']*[bGood]
    ELVector=np.array(EL)
    arL=len(EL)
    Divisor=[2]*arL
    Len2=np.divide(ELVector,Divisor)
    ET=tDUM['THETA_IMAGE']*[bGood]
    Thet=np.array(ET)
    cosT=np.cos(Thet)
    sinT=np.sin(Thet)
    AS=np.multiply(Len2,sinT)
    AC=np.multiply(Len2,cosT)
    ECX=tDUM['X_IMAGE']*[bGood]
    ECY=tDUM['Y_IMAGE']*[bGood]
    CX=np.array(ECX)
    CY=np.array(ECY)
    
    #PLOTTING ENDPOINTS
    EX1=(CX+AC)
    EY1=(CY+AS)
    EX2=(CX-AC)
    EY2=(CY-AS)
    #ENDPOINT1 in ALTAZ
    RA1, DEC1 = wcs.all_pix2world(EX1, EY1, 0)
    tDUM['END_POINT1_RA'] = Column(RA1, unit=u.deg)
    tDUM['END_POINT1_DEC']= Column(DEC1, unit=u.deg)
    objPos1 = SkyCoord(ra= tDUM['END_POINT1_RA'], dec=tDUM['END_POINT1_DEC'], frame='fk5')    
    MappedAltAz1=objPos1.transform_to(AltAz(obstime=time,location=UMD_Observatory))
    myAz1 = np.asarray(MappedAltAz1.az)
    myAlt1 = np.asarray(MappedAltAz1.alt)
    #ENDPOINT2 in ALTAZ
    RA2, DEC2 = wcs.all_pix2world(EX2, EY2, 0)
    tDUM['END_POINT2_RA'] = Column(RA2, unit=u.deg)
    tDUM['END_POINT2_DEC']= Column(DEC2, unit=u.deg)
    objPos2 = SkyCoord(ra= tDUM['END_POINT2_RA'], dec=tDUM['END_POINT2_DEC'], frame='fk5')
    MappedAltAz2=objPos2.transform_to(AltAz(obstime=time,location=UMD_Observatory))
    myAz2 = np.asarray(MappedAltAz2.az)
    myAlt2 = np.asarray(MappedAltAz2.alt)

    dum6 = ax6.scatter(myAz1,myAlt1,marker='.')
    dum66 = ax6.scatter(myAz2,myAlt2,marker='.')

            #Testing Connectors

    AZ12=np.hstack((myAz1,myAz2))
    Con=[] 
    for i in range(0, len(AZ12), 1):
        if i % 2 ==0:
            Con.append(myAz1[i]) 
        else:
            Con.append(myAz2[i])  

    print(Con)
    ALT12=np.hstack((myAlt1,myAlt2))
    Con1=[] 
    for i in range(0, len(ALT12),1):
        if i % 2 ==0:
            Con1.append(myAlt1[i]) 
        else:
            Con1.append(myAlt2[i])  
    print(Con1)
    def connectpoints(x,y,p1,p2):
        x1, x2 = x[p1], x[p2]
        y1, y2 = y[p1], y[p2]
        ax6.plot([x1,x2],[y1,y2],'k-')

    for i in np.arange(len(Con)):
        connectpoints(Con,Con1,i,i+1)
    plt.show()
Пример #8
0
    simulations = Table.read(path)
    separations = simulations['separations_kpc']
    separations.sort()
    center_to_BCG[i] = np.median(separations)

    #print(cluster, len(separations))

    up_errors[i], lo_errors[i] = cs.find_error(separations,
                                               cluster_info['Z'][i])

    i += 1

#make a final table
print("Writing results_" + sys.argv[1] + ".fits ...")
n = Column(name, name="catalog_number", dtype=np.int32)
o = Column(center_to_BCG, name="BCG_offset_kpc")
u = Column(up_errors, name="Upper_Error_kpc")
l = Column(lo_errors, name="Lower_Error_kpc")
z = cluster_info['Z']
results = Table([n, o, u, l, z])
result_table_path = "../results/results_" + sys.argv[1] + ".fits"
if os.path.exists(result_table_path):
    os.remove(result_table_path)
results.write(result_table_path)

#plot results
print("Plotting the results to result_" + sys.argv[1] + '.png')
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib.colors import LogNorm
Пример #9
0
def hdf5_adddata(hdf,
                 sname,
                 meta,
                 debug=False,
                 chk_meta_only=False,
                 mk_test_file=False):
    """ Append COS-Dwarfs data to the h5 file

    Parameters
    ----------
    hdf : hdf5 pointer
    IDs : ndarray
      int array of IGM_ID values in mainDB
    sname : str
      Survey name
    chk_meta_only : bool, optional
      Only check meta file;  will not write
    mk_test_file : bool, optional
      Generate the debug test file for Travis??

    Returns
    -------

    """
    # Add Survey
    print("Adding {:s} survey to DB".format(sname))
    cdwarfs_grp = hdf.create_group(sname)
    # Checks
    if sname != 'COS-Dwarfs':
        raise IOError("Not expecting this survey..")

    # Build spectra (and parse for meta)
    nspec = len(meta)
    max_npix = 20000  # Just needs to be large enough
    data = init_data(max_npix, include_co=False)
    # Init
    spec_set = hdf[sname].create_dataset('spec',
                                         data=data,
                                         chunks=True,
                                         maxshape=(None, ),
                                         compression='gzip')
    spec_set.resize((nspec, ))
    wvminlist = []
    wvmaxlist = []
    npixlist = []
    speclist = []
    # Loop
    path = os.getenv('RAW_IGMSPEC') + '/COS-Dwarfs/'
    maxpix = 0
    for jj, row in enumerate(meta):
        # Generate full file
        coord = ltu.radec_to_coord((row['RA_GROUP'], row['DEC_GROUP']))
        full_file = path + '/J{:s}{:s}_nbin3_coadd.fits.gz'.format(
            coord.ra.to_string(unit=u.hour, sep='', pad=True)[0:4],
            coord.dec.to_string(sep='', pad=True, alwayssign=True)[0:5])
        if 'J1051-0051' in full_file:
            full_file = path + '/PG1049-005_nbin3_coadd.fits.gz'
        if 'J1204+2754' in full_file:
            full_file = path + '/PG1202+281_nbin3_coadd.fits.gz'
        # Parse name
        fname = full_file.split('/')[-1]
        # Extract
        print("COS-Dwarfs: Reading {:s}".format(full_file))
        spec = lsio.readspec(full_file)
        # npix
        npix = spec.npix
        if npix > max_npix:
            raise ValueError(
                "Not enough pixels in the data... ({:d})".format(npix))
        else:
            maxpix = max(npix, maxpix)
        # Some fiddling about
        for key in ['wave', 'flux', 'sig']:
            data[key] = 0.  # Important to init (for compression too)
        data['flux'][0][:npix] = spec.flux.value
        data['sig'][0][:npix] = spec.sig.value
        data['wave'][0][:npix] = spec.wavelength.value
        # Meta
        speclist.append(str(fname))
        wvminlist.append(np.min(data['wave'][0][:npix]))
        wvmaxlist.append(np.max(data['wave'][0][:npix]))
        npixlist.append(npix)
        if chk_meta_only:
            continue
        # Only way to set the dataset correctly
        spec_set[jj] = data

    #
    print("Max pix = {:d}".format(maxpix))
    # Add columns
    meta.add_column(Column(speclist, name='SPEC_FILE'))
    meta.add_column(Column(npixlist, name='NPIX'))
    meta.add_column(Column(wvminlist, name='WV_MIN'))
    meta.add_column(Column(wvmaxlist, name='WV_MAX'))
    meta.add_column(Column(np.arange(nspec, dtype=int), name='GROUP_ID'))

    # Add HDLLS meta to hdf5
    if chk_meta(meta):
        if chk_meta_only:
            pdb.set_trace()
        hdf[sname]['meta'] = meta
    else:
        raise ValueError("meta file failed")
    # References
    refs = [
        dict(url='http://adsabs.harvard.edu/abs/2014ApJ...796..136B',
             bib='bordoloi+14'),
    ]
    jrefs = ltu.jsonify(refs)
    hdf[sname]['meta'].attrs['Refs'] = json.dumps(jrefs)
    #
    return
Пример #10
0
def read_quest():
    """ Read in the QUEST data -- RR Lyrae from the QUEST survey,
        Vivas et al. 2004. 
        
        - Photometry from:
            http://vizier.cfa.harvard.edu/viz-bin/VizieR?-source=J/AJ/127/1158
        - Spectral data from:
            http://iopscience.iop.org/1538-3881/129/1/189/fulltext/204289.tables.html
            Spectroscopy of bright QUEST RR Lyrae stars (Vivas+, 2008)
            
    """
    phot_filename = os.path.join(project_root, "data", "catalog", \
                                 "quest_vivas2004_phot.tsv")
    phot_data = ascii.read(phot_filename, delimiter="\t", data_start=3)

    # With more spectral data, add here
    vivas2004_spec = ascii.read(os.path.join(project_root, "data", "catalog",
                                             "quest_vivas2004_spec.tsv"),
                                delimiter="\t")

    vivas2008_spec = ascii.read(os.path.join(project_root, "data", "catalog",
                                             "quest_vivas2008_spec.tsv"),
                                delimiter="\t",
                                data_start=3)

    vivas2008_spec.rename_column('HJD0', 'HJD')
    spec_data = vstack((vivas2004_spec, vivas2008_spec))
    all_data = join(left=phot_data,
                    right=spec_data,
                    keys=['[VZA2004]'],
                    join_type='outer')

    new_columns = dict()
    new_columns['ra'] = []
    new_columns['dec'] = []
    new_columns['V'] = []
    new_columns['dist'] = []
    new_columns['Type'] = []
    new_columns['Per'] = []
    new_columns['HJD'] = []
    for row in all_data:
        if not isinstance(row["_RAJ2000_1"], np.ma.core.MaskedConstant):
            icrs = coord.ICRSCoordinates(row["_RAJ2000_1"],
                                         row["_DEJ2000_1"],
                                         unit=(u.degree, u.degree))
        elif not isinstance(row["_RAJ2000_2"], np.ma.core.MaskedConstant):
            icrs = coord.ICRSCoordinates(row["_RAJ2000_2"],
                                         row["_DEJ2000_2"],
                                         unit=(u.degree, u.degree))
        else:
            raise TypeError()

        new_columns['ra'].append(icrs.ra.degrees)
        new_columns['dec'].append(icrs.dec.degrees)

        if not isinstance(row["Type_1"], np.ma.core.MaskedConstant):
            new_columns['Type'].append(row['Type_1'])
        elif not isinstance(row["Type_2"], np.ma.core.MaskedConstant):
            new_columns['Type'].append(row['Type_2'])
        else:
            raise TypeError()

        if not isinstance(row["Per_1"], np.ma.core.MaskedConstant):
            new_columns['Per'].append(row['Per_1'])
        elif not isinstance(row["Per_2"], np.ma.core.MaskedConstant):
            new_columns['Per'].append(row['Per_2'])
        else:
            raise TypeError()

        if not isinstance(row["HJD_1"], np.ma.core.MaskedConstant):
            new_columns['HJD'].append(row['HJD_1'])
        elif not isinstance(row["HJD_2"], np.ma.core.MaskedConstant):
            new_columns['HJD'].append(row['HJD_2'])
        else:
            raise TypeError()

        v1 = row['Vmag_1']
        v2 = row['Vmag_2']
        if v1 != None:
            new_columns['V'].append(v1)
        else:
            new_columns['V'].append(v2)

        if row['Dist'] != None:
            d = row['Dist']
        else:
            d = rrl_photometric_distance(new_columns['V'][-1], -1.5)
        new_columns['dist'].append(d)

    for name, data in new_columns.items():
        all_data.add_column(Column(data, name=name))

    all_data["ra"].units = u.degree
    all_data["dec"].units = u.degree
    all_data["dist"].units = u.kpc

    all_data.remove_column('Lambda')
    all_data.remove_column('Beta')

    has_spectrum = np.logical_not(np.array(all_data['Vgsr'].mask))
    all_data.add_column(Column(has_spectrum, name='has_spectrum'))

    return all_data
Пример #11
0
def completeness(input_sources_cat, detected_sources_cat, output_fname,
                 cat_falsedet, Mag_lim, pix_radius):
    """ Finds out how many stars were detected """

    #Load catalogues in table
    input_cat = ascii.read('%s.txt' % input_sources_cat)
    detected_cat = ascii.read('%s.cat' % detected_sources_cat)
    #print (input_cat)
    #print (detected_cat)
    print('Number of sources in stuff catalog below the mag lim of %.2f: %d' %
          (Mag_lim, len(input_cat[input_cat['MAG'] < Mag_lim])))
    print('Number of sources detected: %d \n' % len(detected_cat))

    #Pixel radius
    pixradius = pix_radius

    nb = 0
    i = 0
    det = np.zeros(len(input_cat))
    x_det_list = np.zeros(len(input_cat))
    y_det_list = np.zeros(len(input_cat))
    mag_sex = np.zeros(len(input_cat))

    col_det = Column(name='detected', data=det)
    x_det_coord = Column(name='x_coord_det', data=x_det_list)
    y_det_coord = Column(name='y_coord_det', data=y_det_list)
    mag_det = Column(name='mag_det', data=mag_sex)
    input_cat.add_columns([col_det, x_det_coord, y_det_coord, mag_det])

    col_det_sex = Column(name='detected', data=np.zeros(len(detected_cat)))
    detected_cat.add_columns([col_det_sex])

    for x1, y1 in zip(detected_cat['XPEAK_IMAGE'],
                      detected_cat['YPEAK_IMAGE']):
        #print ('object n. {0:d} at position: {1:.2f}-{2:.2f} \n'.format(nb,x1,y1))
        min_dist = 1e40
        j = 0
        x_det = -1
        y_det = -1
        for x2, y2, mag in zip(input_cat['COORD_XPIXEL'],
                               input_cat['COORD_YPIXEL'], input_cat['MAG']):
            if detected_cat['detected'][i] == 0 and x1 >= int(
                    x2) - pixradius and x1 <= int(x2) + pixradius and y1 >= int(
                        y2) - pixradius and y1 <= int(y2) + pixradius:
                #Test the minimum distance
                dist = (x2 - x1)**2 + (y2 - y1)**2
                if dist < min_dist:  # and detected_cat['MAG_AUTO'][i] > 0.9*mag and detected_cat['MAG_AUTO'][i] < 1.1*mag:
                    min_dist = dist
                    x_det = x1
                    y_det = y1
                    mag_det = detected_cat['MAG_AUTO'][i]
                    index = j
            j += 1
        if min_dist < 1e40:
            nb += 1
            detected_cat['detected'][i] = 1
            #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \n'.format(i,x_det,y_det))
            input_cat['detected'][index] = 1
            input_cat['x_coord_det'][index] = x_det
            input_cat['y_coord_det'][index] = y_det
            input_cat['mag_det'][index] = mag_det
        else:
            detected_cat['detected'][i] = -1
            #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \n'.format(i,x_det,y_det))

        i += 1
    """
   for x1,y1 in zip(input_cat['COORD_YPIXEL'],input_cat['COORD_XPIXEL']):
       nb+=1
       #print ('object n. {0:d} at position: {1:.2f}-{2:.2f} \n'.format(nb,x1,y1))
       min_dist=1e40
       x_det=-1;y_det=-1;
       j=0
       for x2, y2 in zip (detected_cat['XPEAK_IMAGE'], detected_cat['YPEAK_IMAGE']):
           if detected_cat['detected'][j]==0 and x2 >= int(x1)-pixradius and x2 <= int(x1)+pixradius and y2 >= int(y1)-pixradius and y2 <= int(y1)+pixradius:
               #Test the minimum distance
               dist=(x2-x1)**2+(y2-y1)**2
               if dist < min_dist:
                  min_dist=dist
                  x_det=x2
                  y_det=y2
                  mag_det=detected_cat['MAG_AUTO'][j]
                  index=j
           j+=1
   
       if min_dist<1e40:
            i+=1
            detected_cat['detected'][index]=1
            #print ('Matched sources n. {0:d} at position: {1:.2f}-{2:.2f} \n'.format(i,x_det,y_det))
            input_cat['detected'][nb-1]=1
            input_cat['x_coord_det'][nb-1]=x_det
            input_cat['y_coord_det'][nb-1]=y_det
            input_cat['mag_det'][nb-1]=mag_det
   """
    #Cross match catalog
    print('Number of sources matched in both catalogs: %d' % nb)

    #Write output file
    ascii.write(input_cat, '%s.txt' % output_fname)

    x_false_list = detected_cat['XPEAK_IMAGE'][detected_cat['detected'] == -1]
    y_false_list = detected_cat['YPEAK_IMAGE'][detected_cat['detected'] == -1]
    mag_sex = detected_cat['MAG_AUTO'][detected_cat['detected'] == -1]

    #x_det_coord=Column(name='x_coord',data=x_det_list)
    #y_det_coord=Column(name='y_coord',data=y_det_list)
    #mag_det=Column(name='mag_det',data=mag_sex)
    false_det_cat = Table([x_false_list, y_false_list, mag_sex],
                          names=('x_coord', 'y_coord', 'mag_det'))

    #Write false detections in a separated file
    ascii.write(false_det_cat, '%s.txt' % cat_falsedet)
Пример #12
0
    def _parse_horizons(self, src):
        """
        Routine for parsing data from JPL Horizons


        Parameters
        ----------
        self : HorizonsClass instance
        src : list
            raw response from server


        Returns
        -------
        data : `astropy.Table`
        """

        # return raw response, if desired
        if self.return_raw:
            # reset return_raw flag
            self.return_raw = False
            return src

        # split response by line break
        src = src.split('\n')

        data_start_idx = 0
        data_end_idx = 0
        H, G = nan, nan
        M1, M2, k1, k2, phcof = nan, nan, nan, nan, nan
        headerline = []
        for idx, line in enumerate(src):
            # read in ephemerides header line; replace some field names
            if (self.query_type is 'ephemerides' and
                "Date__(UT)__HR:MN" in line):
                headerline = str(line).split(',')
                headerline[2] = 'solar_presence'
                headerline[3] = 'flags'
                headerline[-1] = '_dump'
            # read in elements header line
            elif (self.query_type is 'elements' and
                  "JDTDB," in line):
                headerline = str(line).split(',')
                headerline[-1] = '_dump'
            # read in vectors header line
            elif (self.query_type is 'vectors' and
                  "JDTDB," in line):
                headerline = str(line).split(',')
                headerline[-1] = '_dump'
            # identify end of data block
            if "$$EOE" in line:
                data_end_idx = idx
            # identify start of data block
            if "$$SOE" in line:
                data_start_idx = idx + 1
            # read in targetname
            if "Target body name" in line:
                targetname = line[18:50].strip()
            # read in H and G (if available)
            if "rotational period in hours)" in line:
                HGline = src[idx + 2].split('=')
                if 'B-V' in HGline[2] and 'G' in HGline[1]:
                    H = float(HGline[1].rstrip('G'))
                    G = float(HGline[2].rstrip('B-V'))
            # read in M1, M2, k1, k2, and phcof (if available)
            if "Comet physical" in line:
                HGline = src[idx + 2].split('=')
                M1 = float(HGline[1].rstrip('M2'))
                k1 = float(HGline[3].rstrip('k2'))
                try:
                    M2 = float(HGline[2].rstrip('k1'))
                    k2 = float(HGline[4].rstrip('PHCOF'))
                    phcof = float(HGline[5])
                except ValueError:
                    M2 = nan
                    k2 = nan
                    phcof = nan
            # catch unambiguous names
            if (("Multiple major-bodies match string" in line or
                 "Matching small-bodies:" in line) and
                ("No matches found" not in src[idx + 1])):
                for i in range(idx + 2, len(src), 1):
                    if (('To SELECT, enter record' in src[i]) or
                        ('make unique selection.' in src[i])):
                        end_idx = i
                        break
                raise ValueError('Ambiguous target name; provide ' +
                                 'unique id:\n%s' %
                                 '\n'.join(src[idx + 2:end_idx]))
            # catch unknown target
            if ("Matching small-bodies" in line and
                "No matches found" in src[idx + 1]):
                raise ValueError('Unknown target. Try different id_type.')
            # catch any unavailability of ephemeris data
            if "No ephemeris for target" in line:
                errormsg = line[line.find('No ephemeris for target'):]
                errormsg = errormsg[:errormsg.find('\n')]
                raise ValueError('Horizons Error: {:s}'.format(errormsg))
            # catch elements errors
            if "Cannot output elements" in line:
                errormsg = line[line.find('Cannot output elements'):]
                errormsg = errormsg[:errormsg.find('\n')]
                raise ValueError('Horizons Error: {:s}'.format(errormsg))

        if headerline == []:
            raise IOError('Cannot parse table column names.')

        # remove all 'Cut-off' messages
        raw_data = [line for line in src[data_start_idx:data_end_idx]
                    if 'Cut-off' not in line]

        # read in data
        data = ascii.read(raw_data,
                          names=headerline,
                          fill_values=[('.n.a.', '0'),
                                       ('n.a.', '0')])

        # convert data to QTable
        # from astropy.table import QTable
        # data = QTable(data)
        # does currently not work, unit assignment in columns creates error
        # results in:
        # TypeError: The value must be a valid Python or Numpy numeric type.

        # remove last column as it is empty
        data.remove_column('_dump')

        # add targetname and physical properties as columns
        data.add_column(Column([targetname] * len(data),
                               name='targetname'), index=0)
        if not isnan(H):
            data.add_column(Column([H] * len(data),
                                   name='H'), index=3)
        if not isnan(G):
            data.add_column(Column([G] * len(data),
                                   name='G'), index=4)
        if not isnan(M1):
            data.add_column(Column([M1] * len(data),
                                   name='M1'), index=3)
        if not isnan(M2):
            data.add_column(Column([M2] * len(data),
                                   name='M2'), index=4)
        if not isnan(k1):
            data.add_column(Column([k1] * len(data),
                                   name='k1'), index=5)
        if not isnan(k2):
            data.add_column(Column([k2] * len(data),
                                   name='k2'), index=6)
        if not isnan(phcof):
            data.add_column(Column([phcof] * len(data),
                                   name='phasecoeff'), index=7)

        # set column definition dictionary
        if self.query_type is 'ephemerides':
            column_defs = conf.eph_columns
        elif self.query_type is 'elements':
            column_defs = conf.elem_columns
        elif self.query_type is 'vectors':
            column_defs = conf.vec_columns
        else:
            raise TypeError('Query type unknown.')

        # set column units
        rename = []
        for col in data.columns:
            data[col].unit = column_defs[col][1]
            if data[col].name != column_defs[col][0]:
                rename.append(data[col].name)

        # rename columns
        for col in rename:
            data.rename_column(data[col].name, column_defs[col][0])

        return data
Пример #13
0
# SETUP / TEARDOWN

scalar_zs = [
    0,
    1,
    1100,  # interesting times
    # FIXME! np.inf breaks some funcs. 0 * inf is an error
    np.float64(3300),  # different type
    2 * cu.redshift,
    3 * u.one  # compatible units
]
_zarr = np.linspace(0, 1e5, num=20)
array_zs = [
    _zarr,  # numpy
    _zarr.tolist(),  # pure python
    Column(_zarr),  # table-like
    _zarr * cu.redshift  # Quantity
]
valid_zs = scalar_zs + array_zs

invalid_zs = [
    (None, TypeError),  # wrong type
    # Wrong units (the TypeError is for the cython, which can differ)
    (4 * u.MeV, (u.UnitConversionError, TypeError)),  # scalar
    ([0, 1] * u.m, (u.UnitConversionError, TypeError)),  # array
]


class SubCosmology(Cosmology):
    """Defined here to be serializable."""
Пример #14
0
'''
After doing visual inspection of these candidates: provides new catalogues
'''
artefactlistfile = 'gg_artefact_case1_3-fixed-confirmed.fits'
visually_confirmed = True

artefactlist = Table.read(artefactlistfile)

#select only confirmed ones
if visually_confirmed:
    artefactlist = artefactlist[artefactlist['visual_flag'] == 1]

# for now, no artefacts
artefact = np.zeros(len(lofarcat), dtype=bool)
if 'artefact_flag' not in lofarcat.colnames:
    lofarcat.add_column(Column(artefact, 'artefact_flag'))
else:
    #rewrite artefact info
    lofarcat['artefact_flag'] *= False
for n in artefactlist['Source_Name']:
    ni = np.where(lofarcat['Source_Name'] == n)[0][0]
    lofarcat['artefact_flag'][ni] = True

# some more artefacts from lgz and various visual checks (these are stored as a list of names in a simple text file...)
artefactlistfile = '/local/wwilliams/projects/radio_imaging/lofar_surveys/LoTSS-DR1-July21-2017/lgz_v2/artefacts.txt'
with open(artefactlistfile, 'r') as f:
    artefacts = [line.strip() for line in f]
for n in artefacts:
    if n in lofarcat['Source_Name']:
        ni = np.where(lofarcat['Source_Name'] == n)[0][0]
        lofarcat['artefact_flag'][ni] = True
Пример #15
0
    def group_table(self, edges):
        """Compute bin groups table for the map axis, given coarser bin edges.

        Parameters
        ----------
        edges : `~astropy.units.Quantity`
            Group bin edges.

        Returns
        -------
        groups : `~astropy.table.Table`
            Map axis group table.
        """
        # TODO: try to simplify this code
        if not self.node_type == "edges":
            raise ValueError("Only edge based map axis can be grouped")

        edges_pix = self.coord_to_pix(edges)
        edges_pix = np.clip(edges_pix, -0.5, self.nbin - 0.5)
        edges_idx = np.round(edges_pix + 0.5) - 0.5
        edges_idx = np.unique(edges_idx)
        edges_ref = self.pix_to_coord(edges_idx)

        groups = QTable()
        groups[f"{self.name}_min"] = edges_ref[:-1]
        groups[f"{self.name}_max"] = edges_ref[1:]

        groups["idx_min"] = (edges_idx[:-1] + 0.5).astype(int)
        groups["idx_max"] = (edges_idx[1:] - 0.5).astype(int)

        if len(groups) == 0:
            raise ValueError("No overlap between reference and target edges.")

        groups["bin_type"] = "normal   "

        edge_idx_start, edge_ref_start = edges_idx[0], edges_ref[0]
        if edge_idx_start > 0:
            underflow = {
                "bin_type": "underflow",
                "idx_min": 0,
                "idx_max": edge_idx_start,
                f"{self.name}_min": self.pix_to_coord(-0.5),
                f"{self.name}_max": edge_ref_start,
            }
            groups.insert_row(0, vals=underflow)

        edge_idx_end, edge_ref_end = edges_idx[-1], edges_ref[-1]

        if edge_idx_end < (self.nbin - 0.5):
            overflow = {
                "bin_type": "overflow",
                "idx_min": edge_idx_end + 1,
                "idx_max": self.nbin - 1,
                f"{self.name}_min": edge_ref_end,
                f"{self.name}_max": self.pix_to_coord(self.nbin - 0.5),
            }
            groups.add_row(vals=overflow)

        group_idx = Column(np.arange(len(groups)))
        groups.add_column(group_idx, name="group_idx", index=0)
        return groups
Пример #16
0
def run(params, focus):
    # Remove existing postage stamp images, or creates new folder
    out_dir = params.out_path + params.seg_id + '/'
    if os.path.isdir(out_dir + 'postage_stamps') is True:
        subprocess.call(["rm", '-r', out_dir + 'postage_stamps'])
        subprocess.call(["mkdir", out_dir + 'postage_stamps'])
    else:
        subprocess.call(["mkdir", out_dir + 'postage_stamps'])
    catalogs = []
    tt_files = []
    #Open main catalog in all filters
    for filt in params.filters:
        cat_name = out_dir + '/' + filt + "_clean.cat"
        catalog = Table.read(cat_name, format="ascii.basic")
        #make new column to indicate if postamp is created for that object
        col = Column(np.zeros(len(catalog)),
                     name='IS_PSTAMP',
                     dtype='int',
                     description='created postage stamp')
        catalog.add_column(col)
        catalogs.append(catalog)
        tt_file = params.tt_file_path + "/" + filt + "/{}_stars.txt".format(
            filt)
        tt_files.append(np.loadtxt(tt_file))
    # Get indices of galaxies higher than cut off SNR and not masked.
    # ALso get their size in differnt filters
    idx = [[], [], [], []]
    for i in range(len(catalogs[0])):
        x0 = catalogs[0]['X_IMAGE'][int(i)]
        y0 = catalogs[0]['Y_IMAGE'][int(i)]
        x_sizes = []
        y_sizes = []
        pos = []
        # Select objects that satisfy criterion
        for f, filt in enumerate(params.filters):
            cond1 = (catalogs[f]['IS_STAR'][i] == 0)
            cond2 = (catalogs[f]['IN_MASK'][i] == 0)
            cond3 = (catalogs[f]['SNR'][i] >= 0)
            cond4 = (catalogs[f]['MULTI_DET'][i] == 0)
            #Placing magnitude cut on only last filter
            cond5 = (catalogs[-1]['MAG_CORR'][i] <= 25.2)
            if cond1 and cond2 and cond3 and cond4 and cond5:
                t = (catalogs[f]['THETA_IMAGE'][int(i)]) * np.pi / 180.
                e = catalogs[f]['ELLIPTICITY'][int(i)]
                A = 2.5 * (catalogs[f]['A_IMAGE'][int(i)]) * (
                    catalogs[f]['KRON_RADIUS'][int(i)])
                x_size = A * (np.absolute(np.sin(t)) +
                              (1 - e) * np.absolute(np.cos(t)))
                y_size = A * (np.absolute(np.cos(t)) +
                              (1 - e) * np.absolute(np.sin(t)))
                x_sizes.append(x_size)
                y_sizes.append(y_size)
            else:
                break
            # get coordinates of nearest star in tt_starfeild
            tt_pos = fn.get_closest_tt(x0, y0, tt_files[f])
            if tt_pos:
                pos.append(tt_pos)
            else:
                break
            if f == len(params.filters) - 1:
                idx[0].append(i)
                idx[1].append(x_sizes)
                idx[2].append(y_sizes)
                idx[3].append(pos)
    obj_ids = np.array(idx[0], dtype=int)
    # save list with NUMBER of all objects with pstamps
    np.savetxt(out_dir + 'objects_with_p_stamps.txt', obj_ids, fmt="%i")
    #save catalogs
    for f, filt in enumerate(params.filters):
        # column to save focus
        col = Column(np.ones(len(catalog)) * focus[filt],
                     name='FOCUS',
                     dtype='int',
                     description='Focus of image')
        catalogs[f].add_column(col)
        catalogs[f]['IS_PSTAMP'][obj_ids] = 1
        cat_name = out_dir + '/' + filt + "_full.cat"
        catalogs[f].write(cat_name, format="ascii.basic")
    #Get postage stamp image of the galaxy in all filters.
    #Postage stamp size is set by the largest filter image
    for num, i in enumerate(idx[0]):
        print "Saving postage stamp with object id:", i
        gal_images = []
        psf_images = []
        info = {}
        x0 = catalogs[0]['X_IMAGE'][int(i)]
        y0 = catalogs[0]['Y_IMAGE'][int(i)]
        x_stamp_size = max(idx[1][num])
        y_stamp_size = max(idx[2][num])
        stamp_size = [int(y_stamp_size), int(x_stamp_size)]
        psf_stamp_size = [20, 20]
        print "Stamp size of image:", stamp_size
        #import ipdb; ipdb.set_trace()
        gal_header = pyfits.Header()
        psf_header = pyfits.Header()
        temp = go.GalaxyCatalog(None)
        header_params = temp.output_params
        #import ipdb; ipdb.set_trace()
        for f, filt in enumerate(params.filters):
            tt_pos = idx[3][num][f]
            gal_file_name = out_dir + 'postage_stamps/' + filt + '_' + params.seg_id + '_' + str(
                i) + '_image.fits'
            psf_file_name = out_dir + 'postage_stamps/' + filt + '_' + params.seg_id + '_' + str(
                i) + '_psf.fits'
            seg_file_name = out_dir + 'postage_stamps/' + filt + '_' + params.seg_id + '_' + str(
                i) + '_seg.fits'
            gal_name = params.data_files[filt]
            gal_image = fn.get_subImage_pyfits(x0,
                                               y0,
                                               stamp_size,
                                               gal_name,
                                               None,
                                               None,
                                               save_img=False)
            psf_name = params.tt_file_path + filt + '/' + params.tt_file_name[
                focus[filt]]
            psf_image = fn.get_subImage_pyfits(tt_pos[0],
                                               tt_pos[1],
                                               psf_stamp_size,
                                               psf_name,
                                               None,
                                               None,
                                               save_img=False)
            seg_name = out_dir + filt + '_comb_seg_map.fits'
            seg_image = fn.get_subImage_pyfits(x0,
                                               y0,
                                               stamp_size,
                                               seg_name,
                                               None,
                                               None,
                                               save_img=False)
            for header_param in header_params:
                try:
                    gal_header[header_param] = catalogs[f][header_param][i]
                except:
                    gal_header[header_param] = 9999.99

            psf_header['X'] = tt_pos[0]
            psf_header['Y'] = tt_pos[1]
            psf_header['width'] = psf_stamp_size[0]
            psf_header['height'] = psf_stamp_size[1]
            pyfits.writeto(gal_file_name, gal_image, gal_header, clobber=True)
            pyfits.writeto(psf_file_name, psf_image, psf_header, clobber=True)
            pyfits.writeto(seg_file_name, seg_image, clobber=True)
Пример #17
0
##delta_t_fraction = n.hstack((0., n.cumsum(delta_t_hist[0]) / N_clu))
##delta_t_values_itp = interp1d(delta_t_hist[1], delta_t_fraction)
### if coolness is small, then it is disturbed
### if coolness is long, then it is relaxed
##coolness = delta_t_values_itp(delta_t_MM)

# implement correlated scatter for quantities
# as of now it is 100% correlated (false)

t = Table()

for col_name, unit_val in zip(f2[1].data.columns.names,
                              f2[1].data.columns.units):
    t.add_column(
        Column(name=col_name,
               data=f2[1].data[col_name][cluster],
               unit=unit_val,
               dtype=n.float32))

for col_name in f1[1].data.columns.names:
    if col_name == 'Mvir' or col_name == 'M200c' or col_name == 'M500c':
        t.add_column(
            Column(name='HALO_' + col_name,
                   data=f1[1].data[col_name][cluster] / h,
                   unit='',
                   dtype=n.float32))
    elif col_name == 'id' or col_name == 'pid':
        t.add_column(
            Column(name='HALO_' + col_name,
                   data=f1[1].data[col_name][cluster],
                   unit='',
                   dtype=n.int64))
#Xlan = 0.1

samples = {}
samples['tini'] = 0.1
samples['tmax'] = 14.0
samples['dt'] = 0.1
samples['Xlan'] = Xlan
samples['mej'] = mej
samples['vej'] = vej
ModelPath = '%s/svdmodels'%('../output')
kwargs = {'SaveModel':False,'LoadModel':True,'ModelPath':ModelPath}
kwargs["doAB"] = True
kwargs["doSpec"] = False
t = Table()
for key, val in samples.iteritems():
    t.add_column(Column(data=[val],name=key))
samples = t
model = 'Ka2017'
model_table = KNTable.model(model, samples, **kwargs)
tmag4, lbol4, mag4 = model_table["t"][0], model_table["lbol"][0], model_table["mag"][0]
zp_best4 = 0.0

title_fontsize = 30
label_fontsize = 30

#filts = ["u","g","r","i","z","y","J","H","K"]
filts = ["g","V","F606W","r","i","z","J","F160W","K"]
colors=cm.jet(np.linspace(0,1,len(filts)))
tini, tmax, dt = 0.0, 21.0, 0.1    
tt = np.arange(tini,tmax,dt)
Пример #19
0
def irafstarfind(data,
                 threshold,
                 fwhm,
                 sigma_radius=1.5,
                 minsep_fwhm=2.5,
                 sharplo=0.5,
                 sharphi=2.0,
                 roundlo=0.0,
                 roundhi=0.2,
                 sky=None,
                 exclude_border=False):
    """
    Detect stars in an image using IRAF's "starfind" algorithm.

    `starfind`_ searches images for local density maxima that have a
    peak amplitude greater than ``threshold`` above the local background
    and have a PSF full-width half-maximum similar to the input
    ``fwhm``.  The objects' centroid, roundness (ellipticity), and
    sharpness are calculated using image moments.

    .. _starfind: http://iraf.net/irafhelp.php?val=starfind&help=Help+Page

    Parameters
    ----------
    data : array_like
        The 2D array of the image.

    threshold : float
        The absolute image value above which to select sources.

    fwhm : float
        The full-width half-maximum (FWHM) of the 2D circular Gaussian
        kernel in units of pixels.

    minsep_fwhm : float, optional
        The minimum separation for detected objects in units of
        ``fwhm``.

    sigma_radius : float, optional
        The truncation radius of the Gaussian kernel in units of sigma
        (standard deviation) [``1 sigma = FWHM /
        2.0*sqrt(2.0*log(2.0))``].

    sharplo : float, optional
        The lower bound on sharpness for object detection.

    sharphi : float, optional
        The upper bound on sharpness for object detection.

    roundlo : float, optional
        The lower bound on roundess for object detection.

    roundhi : float, optional
        The upper bound on roundess for object detection.

    sky : float, optional
        The background sky level of the image.  Inputing a ``sky`` value
        will override the background sky estimate.  Setting ``sky``
        affects only the output values of the object ``peak``, ``flux``,
        and ``mag`` values.  The default is ``None``, which means the
        sky value will be estimated using the `starfind`_ method.

    exclude_border : bool, optional
        Set to `True` to exclude sources found within half the size of
        the convolution kernel from the image borders.  The default is
        `False`, which is the mode used by `starfind`_.

    Returns
    -------
    table : `~astropy.table.Table`

        A table of found objects with the following parameters:

        * ``id``: unique object identification number.
        * ``xcentroid, ycentroid``: object centroid (zero-based origin).
        * ``fwhm``: estimate of object FWHM from image moments.
        * ``sharpness``: object sharpness calculated from image moments.
        * ``roundness``: object ellipticity calculated from image moments.
        * ``pa``:  object position angle in degrees from the positive x
          axis calculated from image moments.
        * ``npix``: number of pixels in the object used to calculate
          ``flux``.
        * ``sky``: the derived background sky value, unless ``sky`` was
          input.  If ``sky`` was input, then that value overrides the
          background sky estimation.
        * ``peak``: the peak, sky-subtracted, pixel value of the object.
        * ``flux``: the object sky-subtracted flux, calculated by
          summing object pixels over the Gaussian kernel.  The
          derivation matches that of `starfind`_ if ``sky`` is ``None``.
        * ``mag``: the object instrumental magnitude calculated as
          ``-2.5 * log10(flux)``.  The derivation matches that of
          `starfind`_ if ``sky`` is ``None``.

    See Also
    --------
    daofind

    Notes
    -----
    For the convolution step, this routine sets pixels beyond the image
    borders to 0.0.  The equivalent parameters in `starfind`_ are
    ``boundary='constant'`` and ``constant=0.0``.

    IRAF's `starfind`_ uses ``hwhmpsf``, ``fradius``, and ``sepmin`` as
    input parameters.  The equivalent input values for ``irafstarfind``
    are:

    * ``fwhm = hwhmpsf * 2``
    * ``sigma_radius = fradius * sqrt(2.0*log(2.0))``
    * ``minsep_fwhm = 0.5 * sepmin``

    The main differences between ``daofind`` and ``irafstarfind`` are:

    * ``irafstarfind`` always uses a 2D circular Gaussian kernel,
      while ``daofind`` can use an elliptical Gaussian kernel.

    * ``irafstarfind`` calculates the objects' centroid, roundness,
      and sharpness using image moments.

    References
    ----------
    .. [1] http://iraf.net/irafhelp.php?val=starfind&help=Help+Page
    .. [2] http://stsdas.stsci.edu/cgi-bin/gethelp.cgi?starfind
    """

    starfind_kernel = _FindObjKernel(fwhm,
                                     ratio=1.0,
                                     theta=0.0,
                                     sigma_radius=sigma_radius)
    min_separation = max(2, int((fwhm * minsep_fwhm) + 0.5))
    objs = _findobjs(data,
                     threshold,
                     starfind_kernel,
                     min_separation=min_separation,
                     exclude_border=exclude_border)
    tbl = _irafstarfind_properties(objs, starfind_kernel, sky)
    if len(objs) == 0:
        warnings.warn('No sources were found.', AstropyUserWarning)
        return tbl  # empty table
    table_mask = ((tbl['sharpness'] > sharplo) & (tbl['sharpness'] < sharphi) &
                  (tbl['roundness'] > roundlo) & (tbl['roundness'] < roundhi))
    tbl = tbl[table_mask]
    idcol = Column(name='id', data=np.arange(len(tbl)) + 1)
    tbl.add_column(idcol, 0)
    if len(tbl) == 0:
        warnings.warn(
            'Sources were found, but none pass the sharpness and '
            'roundness criteria.', AstropyUserWarning)
    return tbl
Пример #20
0
fp = desimodel.io.load_fiberpos()  #- load the fiberpos.fits file
telra, teldec = 10.0, 20.0  #- telescope central pointing at this RA,dec

# Create circles at each fiberpos
circles = []
for i in range(len(fp['X'])):
    circles.append(Circle(fp['X'][i], fp['Y'][i], 6))

# circles = [Circle(-200, -300, 6)]

# Aggregate transformed points from each circle
ra_col = np.array([])
dec_col = np.array([])
for c in circles:
    x, y = c.get_points(50)
    ra, dec = xy2radec(telra, teldec, Column(x), Column(y))
    ra_col = append(ra_col, ra)
    dec_col = append(dec_col, dec)

ion()
figure(figsize=(8, 4))
# subplot(121)
# plot(fp['X'], fp['Y'], '.'); xlabel('X [mm]'); ylabel('Y [mm]')
ax = subplot(121)
plot(ra_col, dec_col, '.')
xlabel('RA [degrees]')
ylabel('dec [degrees]')

xlim([10.80, 10.90])
ylim([18.75, 18.85])
Пример #21
0
def observation_table(filename, verbose=False):
    """
    Store gemini observation information in '~astropy.table.Table' object.
    Converts some variable types, performs merging of columns.

    Parameters
    ----------
    cattable : '~astropy.table.Table' of str types
        Table of ot catalog browser output data.

    Returns
    -------
    '~astropy.table.Table'

        Columns
        --------
        prog_ref            (string)                unique program identifier
        obs_id              (string)                unique observation identifier
        pi                  (string)                principle investigator
        inst                (string)                instrument name
        target              (string)                target name
        ra                  (degrees)               right ascension degrees
        dec                 (degrees)               declination degrees
        band                (int)                   integer
        partner             (string)                gemini partner name
        obs_status          (string)                'ready' status of observation
        tot_time            ('astropy.units' hours) total planned observation time
        obs_time            ('astropy.units' hours) completed observation time
        obs_comp            (float)                 fraction of completed/total observation time
        charged_time        (string)                HH:MM:SS (required to compute obs_comp)
        obs_class           (string)                observation class
        iq                  (float)                 image quality constraint (percentile converted to decimal value)
        cc                  (float)                 cloud condition constraint (percentile converted to decimal value)
        bg                  (float)                 sky background constraint (percentile converted to decimal value)
        wv                  (float)                 water vapor constraint (percentile converted to decimal value)
        user_prior          (string)                user priority (Low, Medium, High, Target of Opportunity)
        too_status          (string)                ToO type (Rapid, Standard, None)
        group               (string)                observation group name
        elev_const          (dictionary)            elevation constraint {'type':string,'min':float,'max':float}
        ready               (boolean)               ready status
        disperser           (string)                disperser name
        fpu                 (string)                focal plane unit
        grcwlen             (string)                grating control wavelength
        crwlen              (string)                central wavelength
        filter              (string)                filter name
        mask                (string)                mask name
        xbin                (string)                xbin number
        ybin                (string)                ybin number
    """

    # Read OBD ascii catalog file columns into an 'astropy.table.Table' structure.
    # This module will also need to be replaced or changed if attempting to use a observation data format or a
    # different file format.
    cattable = catalog_table(filename, verbose=verbose)

    # Select observations from catalog to add to queue
    i_obs = selectqueue(cattable=cattable)

    # for now, take all observations from catalog table.
    # A method for selecting only 'ready' observations could be included here.
    # i_obs = np.arange(len(cattable))

    n_obs = len(i_obs)  # number of observations
    obstable = Table()  # initialize table

    # # Add column for time of ToO arrival for simulation purposes
    # if 'arrival' in cattable.colnames:
    #     obstable['arrival'] = cattable['arrival'][i_obs]

    obstable['prog_ref'] = cattable['prog_ref'][i_obs]
    obstable['obs_id'] = cattable['obs_id'][i_obs]
    obstable['pi'] = cattable['pi'][i_obs]
    obstable['inst'] = cattable['inst'][i_obs]
    obstable['target'] = cattable['target'][i_obs]

    # ------ Get current epoch coordinates ------
    epoch = Time.now()
    coord_j2000 = SkyCoord(cattable['ra'][i_obs],
                           cattable['dec'][i_obs],
                           frame='icrs',
                           unit=(u.deg, u.deg))
    current_epoch = coord_j2000.transform_to(
        FK5(equinox='J' + str(epoch.jyear)))
    obstable['ra'] = Column(current_epoch.ra.value, unit='deg')
    obstable['dec'] = Column(current_epoch.dec.value, unit='deg')

    # ------ Format condition constraints -------
    iq = np.array(list(map(fixcondstring, cattable['iq'][i_obs])))
    cc = np.array(list(map(fixcondstring, cattable['cloud'][i_obs])))
    wv = np.array(list(map(fixcondstring, cattable['wv'][i_obs])))
    obstable['iq'], obstable['cc'], obstable['bg'], obstable['wv'] = \
        convertcond.convertcond(iq, cc, cattable['sky_bg'][i_obs], wv)

    obstable['band'] = list(map(int, cattable['band'][i_obs]))
    obstable['partner'] = cattable['partner'][i_obs]
    obstable['obs_status'] = cattable['obs_status'][i_obs]
    obstable['obs_class'] = cattable['obs_class'][i_obs]
    obstable['user_prior'] = cattable['user_prio'][i_obs]
    obstable['qc_prior'] = np.ones(len(i_obs))

    # -- ToO status --
    too_status = []
    for user_prior in obstable['user_prior']:
        if 'Rapid' in user_prior:
            tootype = 'Rapid'
        elif 'Standard' in user_prior:
            tootype = 'Standard'
        else:
            tootype = 'None'
        too_status.append(tootype)
    obstable['too_status'] = too_status

    obstable['group'] = cattable['group'][i_obs]
    obstable['elev_const'] = [
        convert_elevation(cattable['elev_const'][i]) for i in i_obs
    ]
    obstable['time_const'] = cattable['time_const'][i_obs]
    obstable['ready'] = np.array(list(map(bool, cattable['ready'][i_obs])))
    obstable['f2_disperser'] = cattable['disperser'][i_obs]
    obstable['f2_filter'] = cattable['filter'][i_obs]
    obstable['grcwlen'] = cattable['grating_ctrl_wvl'][i_obs]
    obstable['xbin'] = cattable['x_bin'][i_obs]
    obstable['ybin'] = cattable['y_bin'][i_obs]
    obstable['disperser'] = cattable['disperser_2'][i_obs]
    obstable['filter'] = cattable['filter_2'][i_obs]
    obstable['fpu'] = cattable['fpu'][i_obs]
    obstable['custom_mask_mdf'] = cattable['custom_mask_mdf'][i_obs]

    f2_fpu = cattable['focal_plane_unit'][i_obs]
    crwlen = cattable['central_wavelength'][i_obs]
    bh_xbin = cattable['ccd_x_binning'][i_obs]
    bh_ybin = cattable['ccd_y_binning'][i_obs]
    mask = cattable['mask'][i_obs]
    # fpu = cattable['fpu'][i_obs]
    # custom_mask = cattable['custom_mask_mdf'][i_obs]

    # ------ Combine columns ------

    # Put custom mask (MOS MDF) names into fpu field, in Nov 2018 this only works for GMOS
    # print(instcal['gmos_fpu'])
    # if 'Custom Mask' not in instcal['gmos_fpu'].data[0]:
    #     ii = np.where(fpu == 'Custom Mask')[0][:]
    #     if len(ii) != 0:
    #         obstable['fpu'][ii] = custom_mask[ii]

    ii = np.where(f2_fpu != 'null')[0][:]
    if len(ii) != 0:
        obstable['fpu'][ii] = f2_fpu[ii]

    ii = np.where(crwlen != 'null')[0][:]
    if len(ii) != 0:
        obstable['grcwlen'][ii] = crwlen[ii]

    ii = np.where(bh_xbin != 'null')[0][:]
    if len(ii) != 0:
        obstable['xbin'][ii] = bh_xbin[ii]

    ii = np.where(bh_ybin != 'null')[0][:]
    if len(ii) != 0:
        obstable['ybin'][ii] = bh_ybin[ii]

    ii = np.where(mask != 'null')[0][:]
    if len(ii) != 0:
        obstable['fpu'][ii] = mask[ii]

    ii = np.where(obstable['group'] == '')[0][:]
    if len(ii) != 0:
        obstable['group'][ii] = obstable['obs_id'][ii]

    # --- Convert observation times ---
    obs_comp = []
    tot_time = []
    obs_time = []
    # Some charged time fields can be empty
    ii = np.where(cattable['charged_time'][i_obs] == '')[0][:]
    if len(ii) != 0:
        cattable['charged_time'][i_obs][ii] = '00:00:00'
    for i in range(0, n_obs):
        charged = hms_to_hr(cattable['charged_time'][i_obs][i])
        # print(i, cattable['obs_id'][i_obs][i], cattable['charged_time'][i_obs][i], cattable['planned_exec_time'][i_obs][i])
        total = hms_to_hr(cattable['planned_exec_time'][i_obs][i])
        if charged > 0.:  # add additional time
            if 'Mirror' in obstable['disperser'][i]:
                total = total + 0.2
            else:
                total = total + 0.3
        obs_comp.append(charged / total)  # completion fraction
        tot_time.append(total)  # total time required
        obs_time.append(charged)  # observed time
    obstable['tot_time'] = Column(np.array(tot_time, dtype=float), unit='hr')
    obstable['obs_time'] = Column(np.array(obs_time, dtype=float), unit='hr')
    obstable['obs_comp'] = obs_comp

    # unused columns from catalog file...
    # obstable['qa_status'] = cattable['obs_qa'][i_obs]
    # obstable['dataflow_step'] = cattable['dataflow_step'][i_obs]
    # obstable['planned_pi_time'] = cattable.planned_pi_time[i_obs]
    # obstable['ao'] = cattable.ao[i_obs]
    # obstable['group_type'] = cattable.gt[i_obs]
    # obstable['color_filter'] = cattable.color_filter[i_obs]
    # obstable['nd_filter'] = cattable.neutral_density_filter[i_obs]
    # obstable['binning'] = cattable.binning[i_obs]
    # obstable['windowing'] = cattable.windowing[i_obs]
    # obstable['lens'] = cattable.lens[i_obs]
    # obstable['cass_rotator'] = cattable.cass_rotator[i_obs]
    # obstable['bh_ccdamps'] = cattable.ccd_amplifiers[i_obs]
    # obstable['bh_ccdgain'] = cattable.ccd_gain[i_obs]
    # obstable['bh_ccdspeed'] = cattable.ccd_speed[i_obs]
    # obstable['bh_fibre'] = cattable.entrance_fibre[i_obs]
    # obstable['bh_expmeter_filter'] = cattable.exposure_meter_filter[i_obs]
    # obstable['bh_hartmann'] = cattable.hartmann_flap[i_obs]
    # obstable['bh_issport'] = cattable.iss_port[i_obs]
    # obstable['bh_pslitfilter'] = cattable.post_slit_filter[i_obs]
    # obstable['bh_roi'] = cattable.region_of_interest[i_obs]
    # obstable['f2_readmode'] = cattable.read_mode[i_obs]
    # obstable['f2_lyot'] = cattable.lyot_wheel[i_obs]
    # obstable['roi'] = cattable.builtin_roi[i_obs]
    # obstable['nodshuffle'] = cattable.nod_shuffle[i_obs]
    # obstable['dtax'] = cattable.dta_x_offset[i_obs]
    # obstable['custom_mask'] = cattable.custom_mask_mdf[i_obs]
    # obstable['preimage'] = cattable.mos_pre_imaging[i_obs]
    # obstable['amp_count'] = cattable.amp_count[i_obs]
    # obstable['detector'] = cattable.detector_manufacturer[i_obs]
    # obstable['FIELD063'] = cattable.grating_ctrl_wvl_2[i_obs]
    # obstable['FIELD064'] = cattable.x_bin_2[i_obs]
    # obstable['FIELD065'] = cattable.y_bin_2[i_obs]
    # obstable['FIELD066'] = cattable.builtin_roi_2[i_obs]
    # obstable['FIELD067'] = cattable.nod_shuffle_2[i_obs]
    # obstable['FIELD068'] = cattable.dta_x_offset_2[i_obs]
    # obstable['FIELD069'] = cattable.custom_mask_mdf_2[i_obs]
    # obstable['FIELD070'] = cattable.mos_pre_imaging_2[i_obs]
    # obstable['FIELD071'] = cattable.amp_count_2[i_obs]
    # obstable['FIELD072'] = cattable.disperser_3[i_obs]
    # obstable['FIELD073'] = cattable.filter_3[i_obs]
    # obstable['FIELD074'] = cattable.fpu_2[i_obs]
    # obstable['FIELD075'] = cattable.detector_manufacturer_2[i_obs]
    # obstable['pixel_scale'] = cattable.pixel_scale[i_obs]
    # obstable['FIELD077'] = cattable.disperser_4[i_obs]
    # obstable['FIELD078'] = cattable.focal_plane_unit_2[i_obs]
    # obstable['cross_dispersed'] = cattable.cross_dispersed[i_obs]
    # obstable['FIELD080'] = cattable.read_mode_2[i_obs]
    # obstable['iss_port'] = cattable.iss_port_2[i_obs]
    # obstable['FIELD083'] = cattable.well_depth[i_obs]
    # obstable['FIELD084'] = cattable.filter_4[i_obs]
    # obstable['readmode'] = cattable.read_mode_3[i_obs]
    # obstable['astrometric'] = cattable.astrometric_field[i_obs]
    # obstable['FIELD087'] = cattable.disperser_5[i_obs]
    # obstable['adc'] = cattable.adc[i_obs]
    # obstable['observing_mode'] = cattable.observing_mode[i_obs]
    # obstable['coadds'] = cattable.coadds[i_obs]
    # obstable['exptime'] = cattable.exposure_time[i_obs]
    # obstable['FIELD092'] = cattable.disperser_6[i_obs]
    # obstable['eng_mask'] = cattable.engineering_mask[i_obs]
    # obstable['FIELD095'] = cattable.filter_[i_obs]
    # obstable['order'] = cattable.disperser_order[i_obs]
    # obstable['nici_fpu'] = cattable.focal_plane_mask[i_obs]
    # obstable['nici_pupil'] = cattable.pupil_mask[i_obs]
    # obstable['nici_cassrot'] = cattable.cass_rotator_2[i_obs]
    # obstable['nici_imgmode'] = cattable.imaging_mode[i_obs]
    # obstable['nici_dichroic'] = cattable.dichroic_wheel[i_obs]
    # obstable['nici_fw1'] = cattable.filter_red_channel[i_obs]
    # obstable['nici_fw2'] = cattable.filter_blue_channel[i_obs]
    # obstable['nici_welldepth'] = cattable.well_depth_2[i_obs]
    # obstable['nici_dhs'] = cattable.dhs_mode[i_obs]
    # obstable['imaging_mirror'] = cattable.imaging_mirror[i_obs]
    # obstable['FIELD107'] = cattable.disperser_7[i_obs]
    # obstable['FIELD108'] = cattable.mask_2[i_obs]
    # obstable['FIELD109'] = cattable.filter_6[i_obs]
    # obstable['FIELD110'] = cattable.read_mode_4[i_obs]
    # obstable['camera'] = cattable.camera[i_obs]
    # obstable['FIELD112'] = cattable.disperser_8[i_obs]
    # obstable['FIELD113'] = cattable.mask_3[i_obs]
    # obstable['FIELD114'] = cattable.filter_7[i_obs]
    # obstable['beam_splitter'] = cattable.beam_splitter[i_obs]
    # obstable['FIELD116'] = cattable.read_mode_5[i_obs]
    # obstable['FIELD117'] = cattable.mask_4[i_obs]
    # obstable['FIELD118'] = cattable.filter_8[i_obs]
    # obstable['FIELD119'] = cattable.disperser_9[i_obs]
    # obstable['FIELD120'] = cattable.disperser_10[i_obs]
    # obstable['FIELD121'] = cattable.mask_5[i_obs]
    # obstable['FIELD122'] = cattable.filter_9[i_obs]

    return obstable
Пример #22
0
def measure_labeled_regions(data,
                            labels,
                            tag='IMAGE',
                            measure_positions=True,
                            measure_values=True,
                            fits_offset=True,
                            bbox_offset=True):
    """Measure source properties in image.

    Sources are defined by a label image.

    Parameters
    ----------
    TODO

    Returns
    -------
    TODO
    """
    import scipy.ndimage as nd
    from astropy.table import Table, Column
    # Measure all segments
    nsegments = labels.max()
    index = np.arange(1, nsegments + 1)  # Measure all sources
    # Measure stuff
    sum = nd.sum(data, labels, index)
    max = nd.maximum(data, labels, index)
    mean = nd.mean(data, labels, index)
    x, y = _split_xys(nd.center_of_mass(data, labels, index))
    xpeak, ypeak = _split_xys(nd.maximum_position(data, labels, index))
    xmin, xmax, ymin, ymax = _split_slices(nd.find_objects(labels))
    area = _measure_area(labels)
    # Use FITS convention, i.e. start counting at 1
    FITS_OFFSET = 1 if fits_offset else 0
    # Use SExtractor convention, i.e. slice max is inside
    BBOX_OFFSET = -1 if bbox_offset else 0
    # Create a table
    table = Table()
    table.add_column(Column(data=index, name='NUMBER'))

    if measure_positions:
        table.add_column(Column(data=x + FITS_OFFSET, name='X_IMAGE'))
        table.add_column(Column(data=y + FITS_OFFSET, name='Y_IMAGE'))
        table.add_column(Column(data=xpeak + FITS_OFFSET, name='XPEAK_IMAGE'))
        table.add_column(Column(data=ypeak + FITS_OFFSET, name='YPEAK_IMAGE'))
        table.add_column(Column(data=xmin + FITS_OFFSET, name='XMIN_IMAGE'))
        table.add_column(
            Column(data=xmax + FITS_OFFSET + BBOX_OFFSET, name='XMAX_IMAGE'))
        table.add_column(Column(data=ymin + FITS_OFFSET, name='YMIN_IMAGE'))
        table.add_column(
            Column(data=ymax + FITS_OFFSET + BBOX_OFFSET, name='YMAX_IMAGE'))
        table.add_column(Column(data=area, name='AREA'))

    if measure_values:
        table.add_column(Column(data=max, name=tag + '_MAX'))
        table.add_column(Column(data=sum, name=tag + '_SUM'))
        table.add_column(Column(data=mean, name=tag + '_MEAN'))

    return table
Пример #23
0
 def _setup(self, table_type):
     self.data = [Column([1, 3], name='x', dtype=np.int32),
                  np.array([2, 4], dtype=np.int32),
                  np.array([3, 5], dtype='i8')]
Пример #24
0
def main_lcurve(args=None):
    """Main function."""
    import argparse

    description = ('Load a series of cross scans from a config file '
                   'and obtain a calibrated curve.')
    parser = argparse.ArgumentParser(description=description)

    parser.add_argument("file",
                        nargs='?',
                        help="Input calibration file",
                        default=None,
                        type=str)
    parser.add_argument("-s",
                        "--source",
                        nargs='+',
                        type=str,
                        default=None,
                        help='Source or list of sources')
    parser.add_argument("--sample-config",
                        action='store_true',
                        default=False,
                        help='Produce sample config file')

    parser.add_argument("--nofilt",
                        action='store_true',
                        default=False,
                        help='Do not filter noisy channels')

    parser.add_argument("-c",
                        "--config",
                        type=str,
                        default=None,
                        help='Config file')

    parser.add_argument("--splat",
                        type=str,
                        default=None,
                        help=("Spectral scans will be scrunched into a single "
                              "channel containing data in the given frequency "
                              "range, starting from the frequency of the first"
                              " bin. E.g. '0:1000' indicates 'from the first "
                              "bin of the spectrum up to 1000 MHz above'. ':' "
                              "or 'all' for all the channels."))

    parser.add_argument("-o",
                        "--output",
                        type=str,
                        default=None,
                        help='Output file containing the calibration')

    args = parser.parse_args(args)

    if args.sample_config:
        sample_config_file()
        sys.exit()

    if args.file is not None:
        caltable = CalibratorTable.read(args.file)
        caltable.update()
    else:
        if args.config is None:
            raise ValueError("Please specify the config file!")
        caltable = CalibratorTable()
        caltable.from_scans(config_file=args.config)
        caltable.update()

        outfile = args.output
        if outfile is None:
            outfile = args.config.replace(".ini", "_cal.hdf5")

        caltable.write(outfile, overwrite=True)

    sources = args.source
    if args.source is None:
        sources = [standard_string(s) for s in set(caltable['Source'])]

    for s in sources:
        caltable.calculate_src_flux(source=s)
        good = compare_strings(caltable['Source'], s)
        lctable = Table()
        lctable.add_column(Column(name="Time", dtype=float))
        lctable['Time'] = caltable['Time'][good]
        lctable['Flux'] = caltable['Calculated Flux'][good]
        lctable['Flux Err'] = caltable['Calculated Flux Err'][good]
        lctable['Chan'] = caltable['Chan'][good]
        lctable.write(s.replace(' ', '_') + '.csv', overwrite=True)
Пример #25
0
 def _setup(self, table_type):
     self.data = UserDict([('a', Column([1, 3], name='x')),
                           ('b', [2, 4]),
                           ('c', np.array([3, 5], dtype='i8'))])
     assert isinstance(self.data, Mapping)
     assert not isinstance(self.data, dict)
Пример #26
0
    def __init__(self, outfiles, read_mcmc=True, info=None, index=False):
        self.outfiles = outfiles
        #self.legend = info['label']
        #self.imf_type = info['imf_type']
        self.nsample = None
        self.spectra = None

        if read_mcmc:
            self.mcmc = np.loadtxt('{0}.mcmc'.format(self.outfiles))
        results = ascii.read('{0}.sum'.format(self.outfiles))

        with open('{0}.sum'.format(self.outfiles)) as f:
            for line in f:
                if line[0] == '#':
                    if 'Nwalkers' in line:
                        self.nwalkers = float(line.split('=')[1].strip())
                    elif 'Nchain' in line:
                        self.nchain = float(line.split('=')[1].strip())
                    elif 'Nsample' in line:
                        self.nsample = float(line.split('=')[1].strip())

        self.labels = np.array([
            'chi2', 'velz', 'sigma', 'logage', 'zH', 'FeH', 'a', 'C', 'N',
            'Na', 'Mg', 'Si', 'K', 'Ca', 'Ti', 'V', 'Cr', 'Mn', 'Co', 'Ni',
            'Cu', 'Sr', 'Ba', 'Eu', 'Teff', 'IMF1', 'IMF2', 'logfy', 'sigma2',
            'velz2', 'logm7g', 'hotteff', 'loghot', 'fy_logage', 'logemline_h',
            'logemline_oii', 'logemline_oiii', 'logemline_sii', 'logemline_ni',
            'logemline_nii', 'logtrans', 'jitter', 'logsky', 'IMF3', 'IMF4',
            'h3', 'h4', 'ML_v', 'ML_i', 'ML_k', 'MW_v', 'MW_i', 'MW_k'
        ])

        results = Table(results, names=self.labels)
        """
        0:   Mean of the posterior
        1:   Parameter at chi^2 minimum
        2:   1 sigma error
        3-7: 2.5%, 16%, 50%, 84%, 97.5% CLs
        8-9: lower and upper priors
        """

        types = Column([
            'mean', 'chi2', 'error', 'cl25', 'cl16', 'cl50', 'cl84', 'cl98',
            'lo_prior', 'hi_prior'
        ],
                       name='Type')
        results.add_column(types, index=0)
        """
        Create separate table for abundances
        """
        self.xH = results['Type', 'a', 'C', 'N', 'Na', 'Mg', 'Si', 'K', 'Ca',
                          'Ti', 'V', 'Cr', 'Mn', 'Co', 'Ni', 'Cu', 'Sr', 'Ba',
                          'Eu']

        # Creating an empty dict
        # is filled in abundance_correct()
        self.xFe = {}

        self.results = results['Type', 'chi2', 'velz', 'sigma', 'logage', 'zH',
                               'FeH', 'a', 'C', 'N', 'Na', 'Mg', 'Si', 'K',
                               'Ca', 'Ti', 'V', 'Cr', 'Mn', 'Co', 'Ni', 'Cu',
                               'Sr', 'Ba', 'Eu', 'Teff', 'IMF1', 'IMF2',
                               'logfy', 'sigma2', 'velz2', 'logm7g', 'hotteff',
                               'loghot', 'fy_logage', 'logemline_h',
                               'logemline_oii', 'logemline_oiii',
                               'logemline_sii', 'logemline_ni',
                               'logemline_nii', 'logtrans', 'jitter', 'logsky',
                               'IMF3', 'IMF4', 'h3', 'h4', 'ML_v', 'ML_i',
                               'ML_k', 'MW_v', 'MW_i', 'MW_k']
        """
        Read in input data and best fit model

        This isn't going to work correctly if the file
        doesn't exist
        """
        #try:
        m = np.loadtxt('{0}.bestspec'.format(self.outfiles))
        #except:
        #    warning = ('Do not have the *.bestspec file')
        #    warnings.warn(warning)
        data = {}
        data['wave'] = m[:, 0] / (1. +
                                  self.results['velz'][5] * 1e3 / constants.c)
        data['m_flux'] = m[:, 1]  # Model spectrum, normalization applied
        data['d_flux'] = m[:, 2]  # Data spectrum
        data['snr'] = m[:, 3]  # Including jitter and inflated errors
        data['unc'] = 1 / m[:, 3]
        if not index:
            data['poly'] = m[:, 4]  # Polynomial used to create m_flux
        data['residual'] = (m[:, 1] - m[:, 2]) / m[:, 1] * 1e2
        self.spectra = data

        try:
            m = np.loadtxt('{0}.bestspec2'.format(self.outfiles))
            model = {}
            model['wave'] = m[:, 0]
            #model['wave'] = m[:,0]/(1.+self.results['velz'][5]*1e3/constants.c)
            model['flux'] = m[:, 1]
            self.ext_model = model
        except:
            self.ext_model = None
        """
Пример #27
0
    def load_SDSS_DR5(cls, sample='stat'):
        """ Load the DLA from the SDSS-DR5 survey

        (Prochaska & Wolfe 2009, ApJ, 696, 1543)

        Parameters
        ----------
        sample : str, optional
          DLA sample
            stat : Statistical sample
            all : All DLA (NHI >= 20.3)
            all_sys : All systems identified -- Returns an LLSSurvey instead
            nonstat : Non-statistical sample


        Returns
        -------
        dla_survey : DLASurvey

        """
        from .llssurvey import LLSSurvey
        import warnings

        # LLS File
        dla_fil = pyigm_path+'/data/DLA/SDSS_DR5/dr5_alldla.fits.gz'
        print('SDSS-DR5: Loading DLA file {:s}'.format(dla_fil))
        dlas = QTable.read(dla_fil)

        # Rename some columns?
        dlas.rename_column('QSO_RA', 'RA')
        dlas.rename_column('QSO_DEC', 'DEC')

        # Cut on NHI
        if sample != 'all_sys':
            gd_dla = dlas['NHI'] >= 20.3
            dla_survey = cls.from_sfits(dlas[gd_dla])
        else:
            warnings.warn("Loading an LLSSurvey not a DLASurvey")
            dla_survey = LLSSurvey.from_sfits(dlas)

        # Read
        dla_survey.ref = 'SDSS-DR5 (PW09)'

        # g(z) file
        qsos_fil = pyigm_path+'/data/DLA/SDSS_DR5/dr5_dlagz_s2n4.fits'
        print('SDSS-DR5: Loading QSOs file {:s}'.format(qsos_fil))
        qsos = QTable.read(qsos_fil)
        qsos.rename_column('Z1', 'Z_START')
        qsos.rename_column('Z2', 'Z_END')
        # Reformat
        new_cols = []
        for key in qsos.keys():
            if key in ['GZZ', 'GZV']:
                continue
            # New one
            new_cols.append(Column(qsos[key].flatten(), name=key))
        newqsos = QTable(new_cols)
        newqsos['RA'].unit = u.deg
        newqsos['DEC'].unit = u.deg
        dla_survey.sightlines = newqsos

        # All?
        if sample in ['all', 'all_sys']:
            return dla_survey


        # Stat
        # Generate mask
        print('SDSS-DR5: Performing stats')
        mask = dla_stat(dla_survey, newqsos)
        if sample == 'stat':
            dla_survey.mask = mask
        else:
            dla_survey.mask = ~mask
        # Return
        print('SDSS-DR5: Loaded')
        return dla_survey
Пример #28
0
def add_col(arr, tbdata, name):
    ### create column ###
    new_col = Column(arr, name=name)
    ### add to tbdata ###
    tbdata.add_column(new_col)
    return tbdata
Пример #29
0
    data = ascii.read(fnames[i])
    data["std"] = np.nan_to_num(data["std"])

    mask = data["std"] > 0
    x = data["x_center"][mask]
    y = data["std"][mask]

    try:
        j = np.where(y < 0.1)[0][0]
        mass_01 = np.interp(0.1, y[j - 1:j + 1], x[j - 1:j + 1])
    except:
        mass_01 = x[0]
    masses[i] = mass_01
    # print(i, j)

tbl.add_column(Column(data=masses, name="masses"))

####################

clrs = "mrygcbk"[::-1]
xlim = [5E1, 5E5]

plt.figure(figsize=(10, 6))

for i, c, po, in zip(np.unique(tbl["dist"]), clrs, [4, 4, 4, 3, 3, 2, 2]):

    m = (tbl["dist"] == i) * (tbl["density"] < 1E6)

    density = tbl["density"][m]
    masses = tbl["masses"][m]
    j = np.argsort(density)
Пример #30
0
def radprof_iraf(image,
                 coords,
                 radius,
                 step=0.1,
                 output='STDOUT',
                 plotfile=None,
                 verbose=False,
                 test=False,
                 pxscale=None):
    iraf.apphot.centerpars.setParam('maxshift', radius)  # search radius
    if test:
        results = iraf.imexamine(image,
                                 image=image,
                                 use_display='no',
                                 logfile='',
                                 defkey='r',
                                 imagecur=coords,
                                 frame=1,
                                 mode='h',
                                 StdoutG=plotfile,
                                 Stdout=1)
        results = [np.float(x) for x in results[0].split()]
        # prad frad flux bgd mpeak ellip pa mbeta 2dfwhm mfwhm fwhm
        results = OrderedDict(zip(R_COLMAP, results))
        tbl = Table([results])
        tbl = tbl[R_COLMAP]
        tbl['flux'].unit = u.ct
        tbl['bgd'].unit = u.ct
        tbl['mpeak'].unit = u.ct
        tbl['pa'].unit = u.deg
        #tbl['2dfwhm'].unit = u.pix
        tbl['mfwhm'].unit = u.pix
        tbl['fwhm'].unit = u.pix

        #remove 2dfwhm cuz wut?
        tbl.remove_column('2dfwhm')

        if pxscale:
            #add arcsec columns
            for fwhmcol in tbl.colnames[-2:]:
                tbl.add_column(
                    Column(tbl[fwhmcol] * pxscale,
                           name='%s_s' % fwhmcol,
                           unit=u.arcsec,
                           format='%.3f'),
                    tbl.index_column(fwhmcol) + 1)

        gki = iraf.gkidecode(plotfile, Stdout=1, verbose=True)
        gki = '\n'.join(gki)
        xr, yr, xf, yf = extract_gkidata(gki)
        #plt.scatter(xr,yr)
        #plt.plot(xf,yf)
        #plt.show()

        return tbl, xr, yr, xf, yf

    results = iraf.apphot.radprof(image,
                                  radius=radius,
                                  step=step,
                                  coords=coords,
                                  output=output,
                                  verbose=verbose,
                                  verify=False,
                                  interactive=False,
                                  Stdout=1)

    #remove datalines
    data = []
    pradline = None
    for idx, line in enumerate(results):
        if '*\\' in line:
            line = line.split()[0:3]
            line = [np.float(x) for x in line]
            data.append((idx, line))
        #remove pradius line descriptors
        elif 'PRADIUS' in line:
            pradline = idx
    idx, data = zip(*data)
    #print data
    #remove from min idx onward
    ridx = np.min(idx)
    del results[ridx:]
    del results[pradline:pradline + 4]
    # remove '\' from last line
    results[-1] = results[-1][0:-1]
    results = '\n'.join(results[1:])

    rtbl = Table.read(results, format='ascii.daophot')
    #print rtbl['PFWHM']/2.0
    ptbl = Table(rows=data, names=('radius', 'intensity', 'tintensity'))
    ptbl['intensity'] *= rtbl['INORM']
    #tbl.pprint()
    #plt.plot(tbl['radius'],tbl['intensity']*ptbl['INORM'])
    return ptbl