Beispiel #1
0
def test_masked_as_array_with_mixin():
    """Test that as_array() and Table.mask attr work with masked mixin columns"""
    t = Table()
    t['a'] = Time([1, 2], format='cxcsec')
    t['b'] = [3, 4]
    t['c'] = [5, 6] * u.m

    # With no mask, the output should be ndarray
    ta = t.as_array()
    assert isinstance(ta, np.ndarray) and not isinstance(ta, np.ma.MaskedArray)

    # With a mask, output is MaskedArray
    t['a'][1] = np.ma.masked
    ta = t.as_array()
    assert isinstance(ta, np.ma.MaskedArray)
    assert np.all(ta['a'].mask == [False, True])
    assert np.isclose(ta['a'][0].cxcsec, 1.0)
    assert np.all(ta['b'].mask == False)  # noqa
    assert np.all(ta['c'].mask == False)  # noqa

    # Check table ``mask`` property
    tm = t.mask
    assert np.all(tm['a'] == [False, True])
    assert np.all(tm['b'] == False)  # noqa
    assert np.all(tm['c'] == False)  # noqa
Beispiel #2
0
    def test_write_bintable(self):
        '''test write_bintable'''
        from ..io.util import write_bintable, fitsheader
        hdr = fitsheader(dict(A=1, B=2))
        hdr['C'] = ('BLAT', 'FOO')
        data = Table()
        data['X'] = [1, 2, 3]
        data['Y'] = [3, 4, 5]
        write_bintable(self.testfile, data, header=hdr)

        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(result.dtype.names, data.dtype.names)
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))

        self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)

        #- repeat with other data types
        os.remove(self.testfile)
        hdr = dict(A=1, B=2)
        data = data.as_array()
        write_bintable(self.testfile, data, header=hdr)
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(result.dtype.names, data.dtype.names)
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        for key in hdr.keys():
            self.assertIn(key, newhdr)
Beispiel #3
0
def identify_stars(x,
                   y,
                   wcs,
                   identify_catalog,
                   science_catalog=None,
                   filter=None,
                   limit_angle='2 arcsec'):
    """Identify stars with coordinates x and y in a wcs frame and return
    pertinent parameters from the catalogs."""
    cat = identify_catalog
    ra, dec = wcs_xy2radec(x, y, wcs)

    cat_res = cat.match_objects(ra, dec, filter, limit_angle=limit_angle)
    name = cat_res['id']
    mag = cat_res['flux']
    mag_err = cat_res['flux_error']

    res = Table()
    if science_catalog is not None:
        sci = science_catalog
        sci_res = sci.match_objects(ra, dec, limit_angle=limit_angle)
        res['sci_id'] = process_list(string_fix, sci_res['id'])

    res['cat_id'] = process_list(string_fix, name)
    res['ra'] = ra
    res['dec'] = dec
    res['cat_mag'] = mag
    res['cat_mag_err'] = mag_err

    return res.as_array()
Beispiel #4
0
    def readMatch(self, fname, pixId):
        names=['ext_shapeHSM_HsmShapeRegauss_e1','ext_shapeHSM_HsmShapeRegauss_e2','base_SdssShape_x','base_SdssShape_y',\
           'modelfit_CModel_instFlux','modelfit_CModel_instFluxErr','ext_shapeHSM_HsmShapeRegauss_resolution']
        pltDir = '../../galSim-HSC/s19/s19-1/anaCat_newS19Mask_fdeltacut/plot/optimize_weight/'

        # Match
        pix_scale = 0.168 / 3600.
        cosmo252 = imgSimutil.cosmoHSTGal('252')
        cosmo252E = imgSimutil.cosmoHSTGal('252E')

        info = cosmo252.hpInfo[cosmo252.hpInfo['pix'] == pixId]
        nx = int(info['dra'] / pix_scale)
        ny = int(info['ddec'] / pix_scale)

        if 'CosmoE' in fname:
            hstcat = pyfits.getdata('hstcatE-dis4.fits')
        elif 'CosmoR' in fname:
            hstcat = pyfits.getdata('hstcatR-dis4.fits')

        msk = (hstcat['xI'] > 32) & (hstcat['yI'] > 32) & (
            hstcat['xI'] < nx - 32) & (hstcat['yI'] < ny - 32)
        hstcat = hstcat[msk]
        xyRef = np.vstack([hstcat['xI'], hstcat['yI']]).T
        tree = scipy.spatial.cKDTree(xyRef)
        del msk, xyRef

        dd = self.readFits(fname)
        xyDat = np.vstack([dd['base_SdssShape_x'], dd['base_SdssShape_y']]).T
        dis, inds = tree.query(xyDat, k=1)
        mask = (dis <= (0.85 / 0.168))
        dis = dis[mask]
        inds = inds[mask]
        dd = dd[mask]

        wlmsk   =   (catutil.get_imag(dd) < 24.5) & \
            (catutil.get_abs_ellip(dd) <= 2.)   & \
            (catutil.get_res(dd) >= 0.3)        & \
            (catutil.get_snr(dd) >= 10.)        & \
            (catutil.get_imag_A10(dd)<25.5)     & \
            (catutil.get_logb(dd)<= -0.38)
        dd = dd[wlmsk]
        inds = inds[wlmsk]
        matcat = hstcat[inds]
        del mask, inds, dis
        dd = dd[names]

        dd = Table(dd.as_array(), names=names)
        sigmae = catutil.get_sigma_e_model(dd, pltDir)
        erms = catutil.get_erms_model(dd, pltDir)
        dd['i_hsmshaperegauss_derived_weight'] = 1. / (sigmae**2 + erms**2)
        #dd['i_hsmshaperegauss_derived_sigma_e']=   sigmae
        #dd['i_hsmshaperegauss_derived_rms_e']  =   erms
        dd['zphot'] = matcat['zphot']
        dd['mag_auto'] = matcat['mag_auto']
        del xyDat, wlmsk, matcat, hstcat, sigmae, erms
        gc.collect()
        return dd
Beispiel #5
0
def test_read_returns_image(tmpdir):
    # Test if CCData.read returns a image when reading a fits file containing
    # a table and image, in that order.
    tbl = Table(np.ones(10).reshape(5, 2))
    img = np.ones((5, 5))
    hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
                              fits.ImageHDU(img)])
    filename = tmpdir.join('table_image.fits').strpath
    hdul.writeto(filename)
    ccd = CCDData.read(filename, unit='adu')
    # Expecting to get (5, 5), the size of the image
    assert ccd.data.shape == (5, 5)
Beispiel #6
0
def test_read_returns_image(tmpdir):
    # Test if CCData.read returns a image when reading a fits file containing
    # a table and image, in that order.
    tbl = Table(np.ones(10).reshape(5, 2))
    img = np.ones((5, 5))
    hdul = fits.HDUList(hdus=[fits.PrimaryHDU(), fits.TableHDU(tbl.as_array()),
                              fits.ImageHDU(img)])
    filename = tmpdir.join('table_image.fits').strpath
    hdul.writeto(filename)
    ccd = CCDData.read(filename, unit='adu')
    # Expecting to get (5, 5), the size of the image
    assert ccd.data.shape == (5, 5)
Beispiel #7
0
def convert_to_hdulist(ds):
    """ transform TheCannon dataset into fits HDU list """
    print('@Bo Zhang: ---------------------------------------------------')
    print('@Bo Zhang: transforming TheCannon data cubes into HDU list ...')
    print('@Bo Zhang: ---------------------------------------------------')

    # initialize data list
    data_list = [
        ds.wl, ds.ranges, ds.contmask * 1, ds.tr_ID, ds.tr_SNR, ds.tr_flux,
        ds.tr_ivar, ds.tr_label, ds.test_ID, ds.test_SNR, ds.test_flux,
        ds.test_ivar
    ]
    name_list = [
        'wl', 'ranges', 'contmask', 'tr_ID', 'tr_SNR', 'tr_flux', 'tr_ivar',
        'tr_label', 'test_ID', 'test_SNR', 'test_flux', 'test_ivar'
    ]
    hdu_format_list_rw = [
        'image', 'image', 'image', 'table', 'image', 'image', 'image', 'image',
        'table', 'image', 'image', 'image'
    ]

    # construct Primary header
    header = fits.Header()
    header['author'] = 'Bo Zhang (@NAOC)'
    header['version'] = 'v1.0'
    header['last_modified'] = 'Sat 11 Jun 2016'
    header['data'] = 'TheCannon dataset object'

    # initialize HDU list
    print('@Bo Zhang: initializing the HDU list ...')
    hl = [fits.hdu.PrimaryHDU(header=header)]

    # construct HDU list
    assert len(data_list) == len(name_list)
    n_hdus = len(data_list)
    for i in xrange(n_hdus):
        print('@Bo Zhang: transforming HDU [%d/%d]: %s ...' %
              (i + 1, n_hdus, name_list[i]))
        if hdu_format_list_rw[i] == 'table':
            data = Table(data=data_list[i].reshape((-1, 1)),
                         names=[name_list[i]])
            hl.append(fits.BinTableHDU(data.as_array()))
        else:
            hl.append(
                fits.ImageHDU(data=np.array(data_list[i]), name=name_list[i]))

    print('@Bo Zhang: ---------------------------------------------------')
    return fits.HDUList(hl)
Beispiel #8
0
def match_pairs(x, y, dx, dy, tolerance=1.0, logger=logger):
    """Match the pairs of ordinary/extraordinary points (x, y)."""
    kd = cKDTree(list(zip(x, y)))

    px = np.array(x-dx)
    py = np.array(y-dy)

    d, ind = kd.query(list(zip(px, py)), k=1, distance_upper_bound=tolerance,
                      n_jobs=-1)

    o = np.arange(len(x))[np.where(d <= tolerance)]
    e = np.array(ind[np.where(d <= tolerance)])
    result = Table()
    result['o'] = o
    result['e'] = e

    return result.as_array()
Beispiel #9
0
def ZTF_CXC_xmatch(archive, ztfobjects, tcresults, n_results):

    # Filtering ztfobjects with xmatch results
    tcresults_byoid = tcresults.group_by('oid')
    keys = tcresults_byoid.groups.keys['oid'].data
    ztfobjects.add_index('oid')
    maskedztf = Table(ztfobjects.loc[keys])

    # Stipulation if there is only one ZTF object that's been matched with CXC data
    if len(keys) == 1:
        maskedztf = Table(np.repeat(maskedztf, n_results[0]))
        n_results = np.repeat(n_results, n_results[0])

    maskedztf.add_column(n_results, name='n_results')

    # Initializing new data columns for tap service and cone search results
    prefix = {'CDA': 'cda_%s', 'CSC': 'csc_%s'}[archive]
    for colname in tcresults.colnames:
        maskedztf[prefix % (colname)] = np.full(len(maskedztf), None)

    # Formatting filtered ztf object array
    mztf = maskedztf.as_array()
    ztf_fr = []

    if len(keys) > 1:
        # Adding extra rows, according to the number of cone search results each ZTF object has
        ztf_fr = mztf
        ztf_nres = n_results[n_results != 0]
        ztf_fr = Table(np.repeat(ztf_fr, n_results))

    else:
        ztf_fr = Table(mztf)

    # Sort both ztfobjects table and tap service/cone search results table by ZTF oid
    # so that we can easily assign columns
    if len(tcresults) > 1:
        ztf_fr.sort('oid')
        tcresults.sort('oid')

    # Filling new data columns in ztfobjects table
    for colname in tcresults.colnames:
        ztf_fr[prefix % (colname)] = tcresults[colname]

    ztf_fr.remove_column(prefix % ('oid'))
    return ztf_fr
Beispiel #10
0
 def get_stars(self):
     """
     Returns a list of IStar objects from the HDUL's TableHDU information
     """
     if self.file_name is None:
         return []
     with fits.open(self.file_name) as hdul:
         table = Table(hdul[get_table_hdu_number(hdul)].data)
         star_data = table.as_array()
         stars = []
         for i in range(len(table)):
             stars.append(
                 IStar(
                     star_name=get_star_attribute(star_data[i], 0),
                     x=get_star_attribute(star_data[i], 1),
                     y=get_star_attribute(star_data[i], 2),
                     magnitude=get_star_attribute(star_data[i], 5),
                     counts=get_star_attribute(star_data[i], 7),
                 ))
     return stars
Beispiel #11
0
def test_write_create_dataset_kwargs(tmpdir):

    test_file = str(tmpdir.join('test.hdf5'))
    the_path = 'the_table'

    import h5py
    with h5py.File(test_file, 'w') as output_file:
        t1 = Table()
        t1.add_column(Column(name='a', data=[1, 2, 3]))
        t1.write(output_file, path=the_path, maxshape=(None, ))

    # A roundabout way of checking this, but the table created above should be
    # resizable if the kwarg was passed through successfully
    t2 = Table()
    t2.add_column(Column(name='a', data=[4, 5]))
    with h5py.File(test_file, 'a') as output_file:
        output_file[the_path].resize((len(t1) + len(t2), ))
        output_file[the_path][len(t1):] = t2.as_array()

    t3 = Table.read(test_file, path='the_table')
    assert np.all(t3['a'] == [1, 2, 3, 4, 5])
    def test_data_in_other_hdu(self, tmpdir):
        tbl = Table(np.ones(10).reshape(5, 2))
        data = 100 * np.ones(self.shape)
        hdul = fits.HDUList(hdus=[
            fits.PrimaryHDU(),
            fits.TableHDU(tbl.as_array()),
            fits.ImageHDU(data)
        ])
        fname = tmpdir.join('test_table.fits').strpath
        hdul.writeto(fname)

        logs = []
        lh = log_to_list(logger, logs, full_record=True)
        f = _extract_fits(fname)
        assert_equal(f['data'], 100 * np.ones(self.shape))
        assert_equal(f['unit'], None)

        # ensure log emitting
        logs = [i for i in logs if i.message == 'First hdu with image data: 2']
        assert_equal(len(logs), 1)
        assert_equal(logs[0].levelname, 'INFO')

        logger.removeHandler(lh)
Beispiel #13
0
    def to_fits_file(self, path):
        """
        Write the visibilities to a fits file.

        Parameters
        ----------
        path : 'basestr'
            Path to fits file

        Returns
        -------

        """
        primary_hdu = fits.PrimaryHDU()
        primary_hdu.header['source'] = 'xrayvision'
        vis_table = Table([
            self.uv.value.T, self.vis,
            np.repeat([self.xyoffset.value], self.vis.shape, axis=0),
            np.repeat([self.pixel_size.value], self.vis.shape, axis=0)
        ],
                          names=('uv', 'vis', 'xyoffset', 'pixel_size'))

        vis_hdu = fits.BinTableHDU.from_columns(
            fits.ColDefs(vis_table.as_array()))
        if self.uv.unit.bases == self.xyoffset.unit.bases == self.pixel_size.unit.bases:
            vis_hdu.header.set('unit', str(self.uv.unit.bases[0]))
        else:
            raise ValueError(
                f'Units must have the same base unit  uv: {self.uv.unit}, xyoffset: '
                f'{self.xyoffset.unit}, pixel_size: {self.pixel_size.unit}')

        hdul = fits.HDUList([primary_hdu, vis_hdu])
        try:
            hdul.writeto(path)
        except Exception as e:
            raise e
Beispiel #14
0
def convert_to_hdulist(ds):
    """ transform TheCannon dataset into fits HDU list """
    print('@Bo Zhang: ---------------------------------------------------')
    print('@Bo Zhang: transforming TheCannon data cubes into HDU list ...')
    print('@Bo Zhang: ---------------------------------------------------')

    # initialize data list
    data_list = [ds.wl,
                 ds.ranges,
                 ds.contmask*1,
                 ds.tr_ID,
                 ds.tr_SNR,
                 ds.tr_flux,
                 ds.tr_ivar,
                 ds.tr_label,
                 ds.test_ID,
                 ds.test_SNR,
                 ds.test_flux,
                 ds.test_ivar]
    name_list = ['wl',
                 'ranges',
                 'contmask',
                 'tr_ID',
                 'tr_SNR',
                 'tr_flux',
                 'tr_ivar',
                 'tr_label',
                 'test_ID',
                 'test_SNR',
                 'test_flux',
                 'test_ivar']
    hdu_format_list_rw = ['image',
                          'image',
                          'image',
                          'table',
                          'image',
                          'image',
                          'image',
                          'image',
                          'table',
                          'image',
                          'image',
                          'image']

    # construct Primary header
    header = fits.Header()
    header['author'] = 'Bo Zhang (@NAOC)'
    header['version'] = 'v1.0'
    header['last_modified'] = 'Sat 11 Jun 2016'
    header['data'] = 'TheCannon dataset object'

    # initialize HDU list
    print('@Bo Zhang: initializing the HDU list ...')
    hl = [fits.hdu.PrimaryHDU(header=header)]

    # construct HDU list
    assert len(data_list) == len(name_list)
    n_hdus = len(data_list)
    for i in xrange(n_hdus):
        print('@Bo Zhang: transforming HDU [%d/%d]: %s ...'
              % (i+1, n_hdus, name_list[i]))
        if hdu_format_list_rw[i] == 'table':
            data = Table(data=data_list[i].reshape((-1, 1)),
                         names=[name_list[i]])
            hl.append(fits.BinTableHDU(data.as_array()))
        else:
            hl.append(fits.ImageHDU(data=np.array(data_list[i]),
                                    name=name_list[i]))

    print('@Bo Zhang: ---------------------------------------------------')
    return fits.HDUList(hl)
Beispiel #15
0
    def inCCARegionTable(self,cfg_par):
 
        lineInfo = tP.openLineList(cfg_par)
        lineThresh = float(lineInfo['SNThresh'][0])


        f = fits.open(cfg_par['general']['dataCubeName'])
        momHead = f[0].header
        f.close()

        hdul = fits.open(cfg_par['general']['outTableName'])

        anc = hdul['ancels'+cfg_par['gFit']['modName']].data
        bins = hdul['BININFO'].data
        
        if not cfg_par['ancillary']['coldGas']['enable']== True:
            
            linesG1 = hdul['LineRes_G1'].data
            if cfg_par['gFit']['modName'] == 'BF':
                cfg_par['gFit']['modName'] = 'g2'
            residuals = hdul['Residuals_'+cfg_par['gFit']['modName']].data
            cfg_par['gFit']['modName'] = 'BF'
            x=anc['logCentroid_'+cfg_par['ancillary']['coldGas']['Name']]
            y=anc['logDispIntr_'+cfg_par['ancillary']['coldGas']['Name']]
            
        else:
            x=anc['logCentroid_'+cfg_par['ancillary']['coldGas']['Name']]
            y=anc['logSigma_'+cfg_par['ancillary']['coldGas']['Name']]

        if cfg_par['ancillary']['theoreticalCCA'] == 'Ensemble':
            Mean_sigmav = 2.13 
            RMS_sigmav  = 0.13 
            Mean_vshift = 1.59 
            RMS_vshift  = 0.37
            theta = -179.88 #covariance angle (contours inclined)
            ellColor='purple'
        if cfg_par['ancillary']['theoreticalCCA'] == 'Pencil':
            Mean_sigmav = 1.65 
            RMS_sigmav  = 0.41
            Mean_vshift = 2.0
            RMS_vshift  = 0.47
            theta = 165.16 #covariance angle (contours inclined)
            ellColor='darkseagreen'

        rmsToFWHM = 2.*np.sqrt(2.*np.log(2))
        ellWidth = rmsToFWHM*RMS_vshift
        ellHeight = rmsToFWHM*RMS_sigmav

        cos_angle = np.cos(np.radians(180.-theta))
        sin_angle = np.sin(np.radians(180.-theta))

        xc = x - Mean_vshift
        yc = y - Mean_sigmav

        xct = xc * cos_angle - yc * sin_angle
        yct = xc * sin_angle + yc * cos_angle 

        rad_cc = (xct**2/(ellWidth/2.)**2) + (yct**2/(ellHeight/2.)**2)

        CCAvec = np.empty(len(anc['BIN_ID']))*np.nan
        
        CCAMap = np.zeros([momHead['NAXIS2'],momHead['NAXIS1']])*np.nan
        
        if cfg_par['ancillary']['plotRotation'] == True:
            if cfg_par['ancillary']['plotRotation'] == True:

                rotModCol=anc['RotMod']

            RotMap = np.zeros([momHead['NAXIS2'],momHead['NAXIS1']])*np.nan
            RotMapCCA = np.zeros([momHead['NAXIS2'],momHead['NAXIS1']])*np.nan

        for i in range(0,len(rad_cc)):
            
            match_bin = np.where(bins['BIN_ID']==anc['BIN_ID'][i])[0]

            for index in match_bin:

                if not cfg_par['ancillary']['coldGas']['enable']== True:

                    thresHold = residuals['SN_NII6583'][index]
                    sigmaThresh = linesG1['g1_SigIntr_NII6583'][index]

                    #if thresHold >= lineThresh and sigmaThresh < cfg_par['moments']['sigmaThresh']:
                    if thresHold >= lineThresh:
                    
                        if rad_cc[i] <= 1.:
                            # point in ellipse
                            CCAvec[i] = 1 
                        else:
                            # point not in ellipse
                            CCAvec[i] = 0 

                    CCAMap[int(bins['PixY'][index]),int(bins['PixX'][index])] = CCAvec[i]
                    RotMap[int(bins['PixY'][index]),int(bins['PixX'][index])] = rotModCol[index]
                else:

                    if not np.isnan(x[i]) and not np.isnan(y[i]):
                        if rad_cc[i] <= 1.:
                            # point in ellipse
                            CCAvec[i] = 1 
                    
                        else:
                            # point not in ellipse
                            CCAvec[i] = 0 

                    CCAMap[int(bins['PixY'][index]),int(bins['PixX'][index])] = CCAvec[i]
                    RotMap[int(bins['PixY'][index]),int(bins['PixX'][index])] = rotModCol[index]


        if 'CUNIT3' in momHead:
            del momHead['CUNIT3']
        if 'CTYPE3' in momHead:
            del momHead['CTYPE3']
        if 'CDELT3' in momHead:
            del momHead['CDELT3']
        if 'CRVAL3' in momHead:  
            del momHead['CRVAL3']
        if 'CRPIX3' in momHead:
            del momHead['CRPIX3'] 
        if 'NAXIS3' in momHead:
            del momHead['NAXIS3']
        if 'CRDER3' in momHead:
            del momHead['CRDER3']

        Head = momHead.copy()
        Head['WCSAXES'] = 2
        Head['SPECSYS'] = 'topocent'
        Head['BUNIT'] = 'Jy'

        fits.writeto(cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'.fits',CCAMap,Head,overwrite=True)
        if cfg_par['ancillary']['plotRotation'] == True:
            fits.writeto(cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'rot.fits',RotMap,Head,overwrite=True)
            fits.writeto(cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'rotCCA.fits',RotMapCCA,Head,overwrite=True)

        mPl.momAncPlot(cfg_par, cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'.fits',
            cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],'ancillary')

        mPl.momAncPlot(cfg_par, cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'rot.fits',
            cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],'ancillary')

        mPl.momAncPlot(cfg_par, cfg_par['general']['momModDir']+'ccaMap-'+cfg_par['ancillary']['coldGas']['Name']+'rotCCA.fits',
            cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],cfg_par['ancillary']['coldGas']['Name'],'ancillary')


        t=Table(anc)
 
        if 'CCAIN' not in anc.dtype.names: 
            t.add_column(Column(CCAvec,name='CCAIN'))
        else:
            t.replace_column('CCAIN',Column(CCAvec,name='CCAIN'))        
        
        hdul['Ancels'+cfg_par['gFit']['modName']] = fits.BinTableHDU(t.as_array(),name='Ancels'+cfg_par['gFit']['modName'])

        hdul.writeto(cfg_par['general']['outTableName'],overwrite=True)

        return
Beispiel #16
0
def inject_psf(image, mag, coord, psf=None, seed=None):
    """Realize the DES_PSFEx PSF model `psf` at location `coord` on
    ZTF science image `image` (image path) with magnitude `mag` in
    the AB system, fluctuated by Poisson noise.
    """

    # initialize the random number generator
    import galsim
    rng = galsim.BaseDeviate(seed)

    # handle both scalar and vector inputs
    mag = np.atleast_1d(mag)
    if coord.isscalar:
        coord = coord.reshape([1])

    with fits.open(image, mode='update') as hdul:

        # read in the WCS
        header = hdul[0].header
        wcs = WCS(header=header)

        # measure the PSF using PSFEx if not already specified
        if psf is None:
            psf = measure_psf(image)

        # load the image into galsim
        gimage = galsim.fits.read(hdu_list=hdul)

        # convert the world coordinates to pixel coordinates
        ipos = wcs.all_world2pix([[pos.ra.deg, pos.dec.deg] for pos in coord],
                                 1)

        for m, pos in zip(mag, ipos):

            # calculate the measured flux of the object
            flux = 10**(-0.4 * (m - header['MAGZP']))

            image_pos = galsim.PositionD(*pos)

            # store the center of the nearest integer pixel
            iimage_pos = galsim.PositionI(*tuple(map(round, pos)))

            ix, iy = iimage_pos.x, iimage_pos.y

            # calculate the offset between the stamp center
            # and the profile center
            offset = image_pos - iimage_pos

            # create an output stamp onto which to draw the fluctuated
            # psf

            bounds = galsim.BoundsI(ix - NPIX, ix + NPIX, iy - NPIX, iy + NPIX)

            # check that there is at least some overlap between the
            bounds = bounds & gimage.bounds
            if not bounds.isDefined():
                raise RuntimeError('No overlap between PSF stamp and image. '
                                   'Is the object coordinate contained by the '
                                   'image?')

            # get the noise
            noise = galsim.PoissonNoise(rng)

            # realize the psf at the coordinates
            realization = psf.getPSF(image_pos).withFlux(flux)

            # get the local wcs
            lwcs = psf.getLocalWCS(image_pos)

            # draw the image
            imout = realization.drawImage(wcs=lwcs,
                                          offset=offset,
                                          nx=NPIX * 2 + 1,
                                          ny=NPIX * 2 + 1)

            # add the noise
            imout.addNoise(noise)

            # shift the image to the right spot
            imout.setCenter(iimage_pos)

            # add the photons
            gimage[bounds] = gimage[bounds] + imout[bounds]

        # save it as a new hdu
        galsim.fits.write(gimage, hdu_list=hdul)

        # propagate original WCS to output extension
        wcskeys = wcs.to_header(relax=True)
        hdul[-1].header.update(wcskeys)

        # add a record of the fakes as a bintable
        record = {
            'fake_mag': mag,
            'fake_ra': coord.ra.deg,
            'fake_dec': coord.dec.deg,
            'fake_x': ipos[:, 0],
            'fake_y': ipos[:, 1]
        }

        # save the bintable as an extension
        table = Table(record)
        nhdu = fits.BinTableHDU(table.as_array())
        hdul.append(nhdu)
Beispiel #17
0
class BasicCatalog(object):
    def __init__(self,src_names,lon,lat,significance,unit='deg',frame='FK5',_selected=None,_table=None):

        self.selected = np.ones(len(src_names), dtype=np.bool)

        if _selected is not None:
            self.selected = False
            self.selected[_selected] = True

        self._sc = SkyCoord(lon,lat,frame=frame, unit=unit)

        self.lat_name, self.lon_name=self.get_coord_names(self.sc)

        meta={'FRAME':frame}
        meta['COORD_UNIT']=unit
        meta['LON_NAME']=self.lon_name
        meta['LAT_NAME']=self.lat_name

        if _table is None:
            self._table = Table([np.arange(len(src_names)),src_names, significance, lon, lat], names=['meta_ID','src_names', 'significance', self.lon_name, self.lat_name],meta=meta,masked=True)
        else:
            self._table=Table(_table.as_array(),names=_table.colnames,meta=meta,masked=True)

    def select_IDs(self,ids):
        self.unselect_all()
        self.selected[ids]=True


    def select_all(self):
        self.selected[::]=True

    def unselect_all(self):
        self.selected[::]=False

    def get_coord_names(self,sc):
        inv_map = {v: k for k, v in sc.representation_component_names.items()}

        _lat_name = inv_map['lat']
        _lon_name = inv_map['lon']

        return _lat_name,_lon_name


    @property
    def table(self):
        return self._table[self.selected]


    @property
    def sc(self):
        return self._sc[self.selected]

    @property
    def length(self):
        return self._table.as_array().shape[0]

    @property
    def ra(self):
        return self.sc.fk5.ra

    @property
    def dec(self):
        return self.sc.fk5.dec

    @property
    def l(self):
        return self.sc.galactic.l

    @property
    def b(self):
        return self.sc.galactic.b

    @property
    def name(self):
        return self.table['src_names']

    @property
    def significance(self):
        return self.table['significance']

    @property
    def lat(self):
        return self.table[ self.lat_name]

    @property
    def lon(self):
        return self.table[ self.lon_name]

    def add_column(self,data=None,name=None,dtype=None):

        if data is None:
            data=np.zeros(self.table.as_array().shape[0])
        self._table.add_column(Column(data=data,name=name,dtype=dtype))

    def get_dictionary(self ):


        column_lists=[self.table[name].tolist() for name in self.table.colnames]
        for ID,_col in enumerate(column_lists):
            column_lists[ID] = [x if str(x)!='nan' else None for x in _col]

        return dict(cat_frame=self.table.meta['FRAME'],
                    cat_coord_units=self.table.meta['COORD_UNIT'],
                    cat_column_list=column_lists,
                    cat_column_names=self.table.colnames,
                    cat_column_descr=self.table.dtype.descr,
                    cat_lat_name=self.lat_name,
                    cat_lon_name=self.lon_name)



    def write_ds9_region(self,name,overwrite=True):
        ra=self.sc.fk5.ra
        dec=self.sc.fk5.dec
        src_names=self.name

        with open(name,'w') as f:
            for r,d,src_name in zip(ra,dec,src_names):

                s=u'''fk5; point %f %f #point = x  text = {%s} \n'''%(r.deg,d.deg,src_name)
                f.write(s)


    #
    #user_catalog = BasicCatalog(src_names, lon, lat, significance, _table=t, unit=unit, frame=frame)

    def decode(self,enc_table):
        pass


    def encode(self,):
        _table=numpy_encode(self.table.as_array())
        _meta=dumps(self.table.meta)

        return dumps(_table,_meta)


    def write(self,name,format='fits',overwrite=True):
        self._table.write(name,format=format,overwrite=overwrite)

    @classmethod
    def from_ecsv_file(cls, file_name):
        return cls.from_table(Table.read(file_name, format='ascii.ecsv'))


    @classmethod
    def from_fits_file(cls,file_name):
        return cls.from_table(Table.read(file_name,format='fits'))

    @classmethod
    def from_file(cls,file_name):
        format_list=['ascii.ecsv','fits']
        cat=None
        for f in format_list:
            try:
                cat= cls.from_table(Table.read(file_name,format=f))
            except:
                pass

        if cat is None:
            raise RuntimeError('file format for catalog not valid')
        return cat

    @classmethod
    def from_table(cls,table):
        try:
            src_names=table['src_names']
            significance=table['significance']
            frame = table.meta['FRAME']
            lon=table[table.meta['LON_NAME']]
            lat =table[table.meta['LAT_NAME']]
            unit = table.meta['COORD_UNIT']
            cat= cls(src_names,lon,lat,significance,_table=table,unit=unit,frame=frame)
        except:
            raise RuntimeError('Table in fits file is not valid to build Catalog')

        return  cat
Beispiel #18
0
def main(argv=None):
    """ Main Function """
    # Call initial parser from init_utils
    parser = ap.ArgumentParser(description="""Create HDF5 Survey file.""",
                               add_help=True)

    parser.add_argument(
        "-r",
        "--rootdir",
        help="""Root Directory for Reductions""",
        type=str,
        default="/work/03946/hetdex/maverick/red1/reductions/",
    )

    parser.add_argument(
        "-sdir",
        "--shotdir",
        help="""Directory for shot H5 files to ingest""",
        type=str,
        default="/scratch/03946/hetdex/hdr3/reduction/data",
    )

    parser.add_argument(
        "-sl",
        "--shotlist",
        help="""Text file of DATE OBS list""",
        type=str,
        default="dex.hdr2.shotlist",
    )

    parser.add_argument(
        "-ad",
        "--astrometry_dir",
        help="""Directory for Shifts""",
        type=str,
        default="/data/00115/gebhardt/vdrp/shifts/",
    )

    parser.add_argument(
        "-of",
        "--outfilename",
        type=str,
        help="""Relative or absolute path for output HDF5 file.""",
        default=None,
    )

    parser.add_argument(
        "-flim",
        "--flim",
        help="""Path to flim look up table""",
        type=str,
        default="/data/05350/ecooper/hdr2.1/survey/average_one_sigma.txt",
    )

    parser.add_argument("-survey", "--survey", type=str, default="hdr2.1")

    args = parser.parse_args(argv)

    print(args)

    args.log = setup_logging()

    fileh = tb.open_file(args.outfilename,
                         mode="w",
                         title=args.survey.upper() + "Survey file ")

    shotlist = Table.read(args.shotlist,
                          format="ascii.no_header",
                          names=["date", "obs"])

    survey = Table()

    for shotrow in shotlist:
        datevshot = str(shotrow["date"]) + "v" + str(shotrow["obs"]).zfill(3)

        if True:
            args.log.info('Ingesting ' + datevshot)
            file_obs = tb.open_file(op.join(args.shotdir, datevshot + ".h5"),
                                    "r")

            shottable = Table(file_obs.root.Shot.read())

            # updating field in survey file
            shottable['field'] = define_field(str(shottable['objid'][0]))

            survey = vstack([survey, shottable])
            file_obs.close()
        else:  #except:
            args.log.error("Could not ingest %s" % datevshot)

    tableMain = fileh.create_table(fileh.root, "Survey", obj=survey.as_array())

    tableMain.flush()
    fileh.close()
Beispiel #19
0
def gather_nightwatch_qa(night, verbose=False, overwrite=False):
    """Read and stack all the nightwatch QA files for a given night.

    """
    import json
    import astropy.table
    from astropy.table import Table

    qadir = os.path.join(outdir, 'nightwatch')
    if not os.path.isdir(qadir):
        os.makedirs(qadir, exist_ok=True)
    stackwatchfile = os.path.join(qadir,
                                  'qa-nightwatch-{}.fits'.format(str(night)))
    fiberassignmapfile = os.path.join(
        qadir, 'fiberassignmap-{}.fits'.format(str(night)))

    if os.path.isfile(stackwatchfile) and not overwrite:
        print('Reading {}'.format(stackwatchfile))
        data = Table(fitsio.read(stackwatchfile, 'PER_CAMFIBER'))
        fiberassignmap = Table(fitsio.read(fiberassignmapfile))
    else:
        #print('Reading the focal plane model.')
        #fp = desimodel.io.load_focalplane()[0]
        #fp = fp['PETAL', 'FIBER', 'OFFSET_X', 'OFFSET_Y']

        nightdir = os.path.join(nightwatchdir, str(night))
        allexpiddir = glob(os.path.join(nightdir, '????????'))

        data = []
        fiberassignmap = Table(names=('NIGHT', 'EXPID', 'TILEID',
                                      'FIBERASSIGNFILE'),
                               dtype=('U8', 'U8', 'i4', 'U32'))
        for expiddir in allexpiddir:
            expid = os.path.basename(expiddir)
            qafile = os.path.join(expiddir, 'qa-{}.fits'.format(expid))

            qaFITS = fitsio.FITS(qafile)
            if 'PER_CAMFIBER' in qaFITS:
                if verbose:
                    print('Reading {}'.format(qafile))
                qa = Table(qaFITS['PER_CAMFIBER'].read())

            # Hack! Figure out the mapping between EXPID and FIBERMAPusing the request-EXPID.json file.
            requestfile = os.path.join(rawdata_dir, str(night), expid,
                                       'request-{}.json'.format(expid))
            if not os.path.isfile(requestfile):
                print('Missing {}'.format(requestfile))
                continue
            with open(requestfile) as ff:
                req = json.load(ff)
            if 'PASSTHRU' in req.keys():
                if type(req['PASSTHRU']) is dict:
                    tileid = req['PASSTHRU']['TILEID']
                    #tileid = int(req['PASSTHRU'].split(':')[3].split(',')[0])
                else:
                    indx = req['PASSTHRU'].index('TILEID')
                    tileid = req['PASSTHRU'][indx:]
                    tileid = int(tileid[tileid.index(':') +
                                        1:tileid.index(',')])
                # This should use the svn checkout!
                tilefile = glob(
                    os.path.join(rawdata_dir, str(night), '????????',
                                 'fiberassign-{:06d}.fits'.format(tileid)))
                #if len(tilefile) == 0:
                #    print('No fibermap file found for EXPID={}'.format(expid))
                #if len(tilefile) > 0:
                #    print('Multiple fibermap files found for EXPID={}!'.format(expid))
                if len(tilefile) > 0:
                    tsplit = tilefile[0].split('/')
                    fiberassignmap.add_row(
                        (str(night), str(expid), tileid,
                         os.path.join(tsplit[-2], tsplit[-1])))
                    #fiberassignmap[str(expid)] = [tileid]
                    data.append(qa)
                #else:
                #    print('  No tilefile found')
            #else:
            #    print('  No tilefile found')

        if len(data) == 0:
            print('No fiberassign files found for night {}'.format(night))
            return None, None
        data = astropy.table.vstack(data)

        # Need to update the data model to 'f4'.
        print('Updating the data model.')
        for col in data.colnames:
            if data[col].dtype == '>f8':
                data[col] = data[col].astype('f4')

        print('Writing {}'.format(stackwatchfile))
        fitsio.write(stackwatchfile,
                     data.as_array(),
                     clobber=True,
                     extname='PER_CAMFIBER')

        print('Writing {}'.format(fiberassignmapfile))
        # ValueError: unsupported type 'U42'
        #fitsio.write(fiberassignmapfile, fiberassignmap.as_array(), clobber=True)
        fiberassignmap.write(fiberassignmapfile, overwrite=True)

    return data, fiberassignmap
import numpy as np

e_bins = 20
o_bins = 5
e_axis = np.logspace(-1,2,e_bins+1)
o_axis = np.linspace(0,3,o_bins+1)
rad_max = (np.ones([e_bins, o_bins]).T * np.linspace(0.1, 0.01, e_bins)).T

table = Table([[e_axis[:-1]],[e_axis[1:]],
               [o_axis[:-1]],[o_axis[1:]],
               [rad_max]],
              names=('ENERG_LO', 'ENERG_HI',
                     'THETA_LO', 'THETA_HI',
                     'RAD_MAX'))

data = table.as_array()
header = fits.Header()
header['HDUDOC'] = 'https://github.com/open-gamma-ray-astro/gamma-astro-data-formats', ''
header['HDUVERS'] = '0.2', ''
header['HDUCLASS'] = 'GADF', ''
header['HDUCLAS1'] = 'RESPONSE', ''
header['HDUCLAS2'] = 'RAD_MAX', ''
header['HDUCLAS3'] = 'POINT-LIKE', ''
header['HDUCLAS4'] = 'RAD_MAX_2D', ''


tbhdu = fits.BinTableHDU(data, header, name='RAD_MAX')

prihdu = fits.PrimaryHDU()

thdulist = fits.HDUList([prihdu, tbhdu])
class TestUserSuppliedHaloCatalog(TestCase):
    """ Class providing tests of the `~halotools.sim_manager.UserSuppliedHaloCatalog`. 
    """

    def setUp(self):
        """ Pre-load various arrays into memory for use by all tests. 
        """
        self.Nhalos = 1e2
        self.Lbox = 100
        self.redshift = 0.0
        self.halo_x = np.linspace(0, self.Lbox, self.Nhalos)
        self.halo_y = np.linspace(0, self.Lbox, self.Nhalos)
        self.halo_z = np.linspace(0, self.Lbox, self.Nhalos)
        self.halo_mass = np.logspace(10, 15, self.Nhalos)
        self.halo_id = np.arange(0, self.Nhalos, dtype = np.int)
        self.good_halocat_args = (
            {'halo_x': self.halo_x, 'halo_y': self.halo_y, 
            'halo_z': self.halo_z, 'halo_id': self.halo_id, 'halo_mass': self.halo_mass}
            )
        self.toy_list = [elt for elt in self.halo_x]

        self.num_ptcl = 1e4
        self.good_ptcl_table = Table(
            {'x': np.zeros(self.num_ptcl), 
            'y': np.zeros(self.num_ptcl), 
            'z': np.zeros(self.num_ptcl)}
            )


        self.dummy_cache_baseloc = helper_functions.dummy_cache_baseloc
        try:
            shutil.rmtree(self.dummy_cache_baseloc)
        except:
            pass
        os.makedirs(self.dummy_cache_baseloc)

    def test_particle_mass_requirement(self):

        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                **self.good_halocat_args)

    def test_lbox_requirement(self):

        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(particle_mass = 200, 
                **self.good_halocat_args)

    def test_halos_contained_inside_lbox(self):

        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(Lbox = 20, particle_mass = 100, 
                **self.good_halocat_args)

    def test_redshift_is_float(self):

        with pytest.raises(HalotoolsError) as err:
            halocat = UserSuppliedHaloCatalog(
                Lbox = 200, particle_mass = 100, redshift = '1.0', 
                **self.good_halocat_args)
        substr = "The ``redshift`` metadata must be a float."
        assert substr in err.value.message


    def test_successful_load(self):

        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)
        assert hasattr(halocat, 'Lbox')
        assert halocat.Lbox == 200
        assert hasattr(halocat, 'particle_mass')
        assert halocat.particle_mass == 100

    def test_additional_metadata(self):

        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift,
            arnold_schwarzenegger = 'Stick around!', 
            **self.good_halocat_args)
        assert hasattr(halocat, 'arnold_schwarzenegger')
        assert halocat.arnold_schwarzenegger == 'Stick around!'

    def test_all_halo_columns_have_length_nhalos(self):

        # All halo catalog columns must have length-Nhalos
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with pytest.raises(HalotoolsError):
            bad_halocat_args['halo_x'][0] = -1
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)

    def test_positions_contained_inside_lbox_alt_test(self):
        # positions must be < Lbox
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with pytest.raises(HalotoolsError):
            bad_halocat_args['halo_x'][0] = 10000
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)

    def test_has_halo_x_column(self):
        # must have halo_x column 
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with pytest.raises(HalotoolsError):
            del bad_halocat_args['halo_x']
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)

    def test_has_halo_id_column(self):
        # Must have halo_id column 
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with pytest.raises(HalotoolsError):
            del bad_halocat_args['halo_id']
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)

    def test_has_halo_mass_column(self):
        # Must have some column storing a mass-like variable
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with pytest.raises(HalotoolsError):
            del bad_halocat_args['halo_mass']
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)

    def test_halo_prefix_warning(self):
        # Must raise warning if a length-Nhalos array is passed with 
        # a keyword argument that does not begin with 'halo_'
        bad_halocat_args = deepcopy(self.good_halocat_args)
        with warnings.catch_warnings(record=True) as w:
            # Cause all warnings to always be triggered.
            warnings.simplefilter("always")
            bad_halocat_args['s'] = np.ones(self.Nhalos)
            halocat = UserSuppliedHaloCatalog(Lbox = 200, 
                particle_mass = 100, redshift = self.redshift,
                **bad_halocat_args)
            assert 'interpreted as metadata' in str(w[-1].message)

    def test_ptcl_table(self):
        """ Method performs various existence and consistency tests on the input ptcl_table.

        * Enforce that instances do *not* have ``ptcl_table`` attributes if none is passed. 

        * Enforce that instances *do* have ``ptcl_table`` attributes if a legitimate one is passed. 

        * Enforce that ptcl_table have ``x``, ``y`` and ``z`` columns. 

        * Enforce that ptcl_table input is an Astropy `~astropy.table.Table` object, not a Numpy recarray
        """

    def test_ptcl_table_dne(self):
        # Must not have a ptcl_table attribute when none is passed
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift,
            **self.good_halocat_args)
        assert not hasattr(halocat, 'ptcl_table')

    def test_ptcl_table_exists_when_given_goodargs(self):
   
        # Must have ptcl_table attribute when argument is legitimate
        halocat = UserSuppliedHaloCatalog(
            Lbox = 200, particle_mass = 100, redshift = self.redshift,
            ptcl_table = self.good_ptcl_table, **self.good_halocat_args)
        assert hasattr(halocat, 'ptcl_table')

    def test_min_numptcl_requirement(self):
        # Must have at least 1e4 particles
        num_ptcl2 = 1e3
        ptcl_table2 = Table(
            {'x': np.zeros(num_ptcl2), 
            'y': np.zeros(num_ptcl2), 
            'z': np.zeros(num_ptcl2)}
            )
        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(
                Lbox = 200, particle_mass = 100, redshift = self.redshift,
                ptcl_table = ptcl_table2, **self.good_halocat_args)

    def test_ptcls_have_zposition(self):
        # Must have a 'z' column 
        num_ptcl2 = 1e4
        ptcl_table2 = Table(
            {'x': np.zeros(num_ptcl2), 
            'y': np.zeros(num_ptcl2)}
            )
        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(
                Lbox = 200, particle_mass = 100, redshift = self.redshift,
                ptcl_table = ptcl_table2, **self.good_halocat_args)

    def test_ptcls_are_astropy_table(self):
        # Data structure must be an astropy table, not an ndarray
        ptcl_table2 = self.good_ptcl_table.as_array()
        with pytest.raises(HalotoolsError):
            halocat = UserSuppliedHaloCatalog(
                Lbox = 200, particle_mass = 100, redshift = self.redshift,
                ptcl_table = ptcl_table2, **self.good_halocat_args)

    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache1(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        os.system('touch ' + fname)
        assert os.path.isfile(fname)

        dummy_string = '  '
        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, dummy_string, dummy_string, dummy_string, dummy_string)
        substr = "Either choose a different fname or set ``overwrite`` to True"
        assert substr in err.value.message

        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, dummy_string, dummy_string, dummy_string, dummy_string, 
                overwrite = True)
        assert substr not in err.value.message

    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache2(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc'

        dummy_string = '  '
        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                basename, dummy_string, dummy_string, dummy_string, dummy_string)
        substr = "The directory you are trying to store the file does not exist."
        assert substr in err.value.message

    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache3(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        os.system('touch ' + fname)
        assert os.path.isfile(fname)

        dummy_string = '  '
        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, dummy_string, dummy_string, dummy_string, dummy_string, 
                overwrite = True)
        substr = "The fname must end with an ``.hdf5`` extension."
        assert substr in err.value.message

    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache4(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc.hdf5'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        os.system('touch ' + fname)
        assert os.path.isfile(fname)

        dummy_string = '  '
        class Dummy(object):
            pass
            
            def __str__(self):
                raise TypeError
        not_representable_as_string = Dummy()

        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, not_representable_as_string, dummy_string, dummy_string, dummy_string, 
                overwrite = True)
        substr = "must all be strings."
        assert substr in err.value.message

    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache5(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc.hdf5'
        fname = os.path.join(self.dummy_cache_baseloc, basename)
        os.system('touch ' + fname)
        assert os.path.isfile(fname)

        dummy_string = '  '
        class Dummy(object):
            pass
            
            def __str__(self):
                raise TypeError
        not_representable_as_string = Dummy()

        with pytest.raises(HalotoolsError) as err:
            halocat.add_halocat_to_cache(
                fname, dummy_string, dummy_string, dummy_string, dummy_string, 
                overwrite = True, some_more_metadata = not_representable_as_string)
        substr = "keyword is not representable as a string."
        assert substr in err.value.message


    @pytest.mark.skipif('not HAS_H5PY')
    def test_add_halocat_to_cache6(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)

        basename = 'abc.hdf5'
        fname = os.path.join(self.dummy_cache_baseloc, basename)

        simname = 'dummy_simname'
        halo_finder = 'dummy_halo_finder'
        version_name = 'dummy_version_name'
        processing_notes = 'dummy processing notes'

        halocat.add_halocat_to_cache(
            fname, simname, halo_finder, version_name, processing_notes, 
            overwrite = True, some_additional_metadata = processing_notes)

        cache = HaloTableCache()
        assert halocat.log_entry in cache.log

        cache.remove_entry_from_cache_log(
            halocat.log_entry.simname, 
            halocat.log_entry.halo_finder,
            halocat.log_entry.version_name,
            halocat.log_entry.redshift,
            halocat.log_entry.fname, 
            raise_non_existence_exception = True, 
            update_ascii = True,
            delete_corresponding_halo_catalog = True)

    @pytest.mark.skipif('not HAS_H5PY')
    @pytest.mark.xfail
    def test_add_ptcl_table_to_cache(self):
        halocat = UserSuppliedHaloCatalog(Lbox = 200, 
            particle_mass = 100, redshift = self.redshift, 
            **self.good_halocat_args)
        halocat.add_ptcl_table_to_cache()


    def tearDown(self):
        try:
            shutil.rmtree(self.dummy_cache_baseloc)
        except:
            pass
        'length': length,
        'phi': phi,
        'psi': psi,
        'r': r,
        'cen_x': cen_x,
        'cen_y': cen_y,
        'size': size,
        'mcAlttel': mcAlttel,
        'mcAztel': mcAztel
    }
    ntuple = Table(output)

    #If destination fitsfile doesn't exist, will create a new one with proper headers
    if os.path.isfile(outfile) == False:
        #Convert Tables of data into HDUBinTables to write them into fits files
        pardata = ntuple.as_array()
        parheader = fits.Header()
        parheader.update(ntuple.meta)

        if storeimg == True:
            pixels = fits.ImageHDU(fitsdata)  #Image with pixel content

        #Write the data in an HDUList for storing in a fitsfile
        hdr = fits.Header(
        )  #Example header, we can add more things to this header
        hdr['TEL'] = 'LST1'
        primary_hdu = fits.PrimaryHDU(header=hdr)
        hdul = fits.HDUList([primary_hdu])
        hdul.append(fits.BinTableHDU(data=pardata, header=parheader))
        if storeimg == True:
            hdul.append(pixels)
Beispiel #23
0
    def test_write_bintable(self):
        """Test writing binary tables to FITS.
        """
        from ..io.util import write_bintable, fitsheader
        #
        # Input: Table
        #
        hdr = fitsheader(dict(A=1, B=2))
        hdr['C'] = ('BLAT', 'FOO')
        data = Table()
        data['X'] = [1, 2, 3]
        data['Y'] = [3, 4, 5]
        write_bintable(self.testfile, data, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))
        self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: ndarray
        #
        hdr = dict(A=1, B=2)
        data = data.as_array()
        write_bintable(self.testfile, data, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: dictionary
        #
        hdr = dict(A=1, B=2)
        d = dict(X=np.array([1, 2, 3]), Y=np.array([3, 4, 5]))
        write_bintable(self.testfile, d, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)

        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))

        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: Table with column comments.
        #
        hdr = fitsheader(dict(A=1, B=2))
        hdr['C'] = ('BLAT', 'FOO')
        data = Table()
        data['X'] = [1, 2, 3]
        data['Y'] = [3, 4, 5]
        write_bintable(self.testfile, data, header=hdr,
                       comments={'X': 'This is X', 'Y': 'This is Y'},
                       units={'X': 'mm', 'Y': 'mm'})
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        self.assertEqual(newhdr['TTYPE1'], 'X')
        self.assertEqual(newhdr.comments['TTYPE1'], 'This is X')
        self.assertEqual(newhdr['TTYPE2'], 'Y')
        self.assertEqual(newhdr.comments['TTYPE2'], 'This is Y')
        self.assertEqual(newhdr['TUNIT1'], 'mm')
        self.assertEqual(newhdr.comments['TUNIT1'], 'X units')
        self.assertEqual(newhdr['TUNIT2'], 'mm')
        self.assertEqual(newhdr.comments['TUNIT2'], 'Y units')
        #
        # Input: Table with no EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr)
        #
        # Input: Table with EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True, extname='FOOBAR')
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]), '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        #
        # Input: Table with existing EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')
        #
        # Input: Table with EXTNAME, existing file, overwrite
        #
        write_bintable(self.testfile, data, header=hdr, extname='FOOBAR', clobber=True)
def importDATA(password):
    #import input data from MIGO table
    Special_id1, Obs_date, Vertex1, Vertex2, Vertex3, Vertex4, Status, Updated_Date = Open_InputDatabase.getinputs(
        password)
    #loop over each special ID
    for x in range(0, 2):  #len(Special_id1)):
        #import url and make a table
        url, z, TYPE = url_for_ISPY_SkyCoord.ISPY_ephemeris_inSkyCoord(
            'JWST', Obs_date[x], Special_id1[x], Vertex1[x], Vertex2[x],
            Vertex3[x], Vertex4[x])
        #get table name and create a table for this Specific ID
        table_name = Setup_table_inMYSQL_tofillin_data.makeMySQLtable(
            Special_id1[x], password)
        Special_id = table_name
        #check if there are any data on the website
        if z == None:
            k = 'No results'
        else:
            #get the table and the tablelines
            distant, tablelines = Create_ISPYtable.make_table(
                z, 'SPK-ID', 'EXPLANATION', 1, 1)
            #make the table into lines to put in the database
            f = Table.as_array(distant)

            #make a string of JPL_SPKID values
            JPL_SPKID = []
            for x in range(0, len(f)):
                JPL_SPKID.append(f[x][0])

            # Open database connection
            db = pymysql.connect("localhost", "root", password)

            # prepare a cursor object using cursor() method and go to the right database
            cursor = db.cursor()
            cursor.execute("USE ISPY")

            #table columns
            tab_col = [
                'JPL_SPKID', 'IAU_Number', 'Name_designation', 'RA', 'DEC1',
                'Amag', 'dRAcosD', 'dDEC_by_dt', 'CntDst', 'PsAng', 'Data_Arc',
                'Nobs', 'Error', 'Ellipse', 'Theta', 'Last_updated',
                table_name, 'Special_id', Special_id
            ]

            #check if there are any data in the table already
            sql3 = "SELECT {0} from {1}".format(tab_col[0], tab_col[16])
            try:
                # Execute the SQL command
                cursor.execute(sql3)
                #Fetch all the rows in a list of lists. These are the data which are already in the database
                result = cursor.fetchall()
            except:
                print("Error: unable to fetch data")

            #select the data from the table which are older than 1 month
            sql4 = "SELECT {0} from {1} where {2}<NOW()-INTERVAL 1 MONTH".format(
                tab_col[0], tab_col[16], tab_col[15])
            try:
                # Execute the SQL command
                cursor.execute(sql4)
                # Fetch all the rows in a list of lists. these are the data which are older than 1 month
                result1 = cursor.fetchall()
                # Now print fetched result
            except:
                print('No data to return')

            #loop over all the rows in the table from ISPY
            for x in range(0, len(f)):
                #check if the asteroid already in the database
                if JPL_SPKID[x] in result:
                    #print ('Already in the database')

                    #check how old are the data of the asteroid in the database (if old update them)
                    if JPL_SPKID[x] in result1:

                        sql1 = "UPDATE {0} SET RA='{1[3]}',DEC1='{1[4]}',Special_id='{2}' where JPL_SPKID={1[0]}".format(
                            tab_col[16], f[x], tab_col[18])

                        try:
                            # Execute the SQL command
                            cursor.execute(sql1)
                            # Commit your changes in the database
                            db.commit()
                        except:
                            # Rollback in case there is any error
                            db.rollback()
                        #update Status in the input table to know if that line has been run already
                        sql11 = "UPDATE INPUT_Table SET Status='UPDATED' where Special_id='{0}'".format(
                            tab_col[16])
                        #print (sql11)
                        try:
                            # Execute the SQL command
                            cursor.execute(sql11)
                            # Commit your changes in the database
                            db.commit()
                        except:
                            # Rollback in case there is any error
                            db.rollback()

                        #loop over all the columns, take care for the empty data columns/rows
                        for y in range(0, len(tab_col) - 4):
                            if y == 0 or y == 2 or y == 3 or y == 4:
                                sql1 = 0
                            elif ma.getmask(f[x][y]) == True:
                                # Prepare SQL query to INSERT a record into the database.
                                sql1 = 0
                                #if data is missing, nothing is imported into the database (the value becomes None)
                            else:
                                #fill in(update) the data
                                sql1 = "UPDATE [0] SET {1}={2}\
                                        where {3}={4}".format(
                                    tab_col[16], tab_col[y], f[x][y],
                                    tab_col[0], f[x][0])

                                try:
                                    # Execute the SQL command
                                    cursor.execute(sql1)
                                    # Commit your changes in the database
                                    db.commit()
                                except:
                                    # Rollback in case there is any error
                                    db.rollback()

                                #update Status in the input table to know if that line has been run already
                                sql11 = "UPDATE INPUT_Table SET Status='UPDATED' where Special_id='{0}'".format(
                                    tab_col[16])
                                #print (sql11)
                                try:
                                    # Execute the SQL command
                                    cursor.execute(sql11)
                                    # Commit your changes in the database
                                    db.commit()
                                except:
                                    # Rollback in case there is any error
                                    db.rollback()

                #if the data is not in the database, input them
                else:

                    sql1 = "INSERT INTO {0}(Special_id,JPL_SPKID,Name_designation,\
                                      RA ,\
                                      DEC1 ,Last_Updated)\
                           VALUES ('{2}',{1[0]},'{1[2]}','{1[3]}','{1[4]}',NOW())".format(
                        tab_col[16], f[x], tab_col[18])

                    try:
                        # Execute the SQL command
                        cursor.execute(sql1)
                        # Commit your changes in the database
                        db.commit()
                    except:
                        # Rollback in case there is any error
                        db.rollback()
                    #loop over all the columns
                    for y in range(0, len(tab_col) - 4):
                        if y == 0 or y == 2 or y == 3 or y == 4:
                            sql1 = 0
                        elif ma.getmask(f[x][y]) == True:
                            # Prepare SQL query to INSERT a record into the database.
                            sql1 = 0
                        else:
                            sql1 = "UPDATE {0} SET {1}={2}\
                            where {3}={4}".format(tab_col[16], tab_col[y],
                                                  f[x][y], tab_col[0], f[x][0])
                            #rint (x,y,f[x][y])
                            try:
                                # Execute the SQL command
                                cursor.execute(sql1)
                                # Commit your changes in the database
                                db.commit()
                            except:
                                # Rollback in case there is any error
                                db.rollback()

                    #update Status in the input table to know if that line has been run already
                    sql11 = "UPDATE INPUT_Table SET Status='UPDATED' where Special_id='{0}'".format(
                        tab_col[16])
                    #print (sql11)
                    try:
                        # Execute the SQL command
                        cursor.execute(sql11)
                        # Commit your changes in the database
                        db.commit()
                    except:
                        # Rollback in case there is any error
                        db.rollback()

            #check if the number of rows in f and in MySQL is the same (so if all the data has been imported)
            sql2 = "SELECT COUNT(*) FROM {0}".format(tab_col[16])

            try:
                # Execute the SQL command
                cursor.execute(sql2)
                # Fetch all the rows in a list of lists.
                result2 = cursor.fetchone()
                # Now check fetched result
                if result2[0] == len(f):
                    print(
                        'For Special ID ', Special_id,
                        ': All the data have been recorded into the database.')
                else:
                    #print ('For Special ID ',Special_id,': The number of rows in the database is not the same as from ISPY')
                    #check why the sizes of tables do not match
                    sql5 = "SELECT {0} from {1}".format(
                        tab_col[0], tab_col[16])
                    try:
                        # Execute the SQL command
                        cursor.execute(sql5)
                        #Fetch all the rows in a list of lists.
                        result5 = cursor.fetchall()
                        for x in range(0, len(result5)):
                            #check if the asteroid already in the database
                            if result5[x] not in JPL_SPKID:
                                # Prepare SQL query to DELETE required records
                                sql6 = "DELETE FROM {0} WHERE {1}={2}".format(
                                    tab_col[16], tab_col[0], result5[x][0])
                                #print (sql6)
                                try:
                                    # Execute the SQL command
                                    cursor.execute(sql6)
                                    # Commit your changes in the database
                                    db.commit()
                                except:
                                    # Rollback in case there is any error
                                    db.rollback()
                                    print(
                                        'Not possible to delete the outdated data'
                                    )
                    except:
                        print(
                            "Error: unable to fetch data to find the reason for not matching tables."
                        )
                    try:
                        # Execute the SQL command
                        cursor.execute(sql2)
                        # Fetch all the rows in a list of lists.
                        result2 = cursor.fetchone()
                        # Now check fetched result
                        if result2[0] == len(f):
                            print(
                                'For Special ID ', Special_id,
                                ': All the data have been recorded into the database.'
                            )
                    except:
                        print('For Special ID ', Special_id,
                              ": Error: unable to fetch data")

            except:
                print('For Special ID ', Special_id,
                      ": Error: unable to fetch data")
            # disconnect from server
            db.close()
    return Special_id1
synth_sp = []
smooth_sp = []
lambdas = spectra[0][14]
colnames_out = colnames[:12] + colnames[15:]

width = 50./ (lambdas[1] - lambdas[0])

for row in spectra:
    # Some spectra are just invalid...
    if row[12][0] > 0.2:
        data.append(row[:12] + row[15:])
        obs_sp.append(row[12])
        smooth_sp.append(row[12] / gaussian_filter1d(row[12], width))
        synth_sp.append(row[13])

data_table = Table(rows=data, names=colnames_out, dtype=[type(x) for x in data[0]])
header = fits.Header()
header.update(data_table.meta)
main = fits.BinTableHDU(data_table.as_array(), header)
lambda_column = fits.Column('lambda', 'E', array=lambdas)
lambdas = fits.BinTableHDU.from_columns([lambda_column])
header = fits.Header()
header['CONTENT'] = 'Observed spectra'
obs_sp = fits.ImageHDU(obs_sp, header, do_not_scale_image_data=True)
header['CONTENT'] = 'Smoothed spectra'
smooth_sp = fits.ImageHDU(smooth_sp, header, do_not_scale_image_data=True)
header['CONTENT'] = 'Synthetic spectra'
synth_sp = fits.ImageHDU(synth_sp, header, do_not_scale_image_data=True)
hdulist = fits.HDUList([hdu, main, obs_sp, smooth_sp, synth_sp, lambdas])
hdulist.writeto('RAVE_ready.fits')
Beispiel #26
0
    def aper_summary(self, gal_only=False, output=False):
        """Get all the stellar mass, age, and metallicity profiles.

        Parameters
        ----------
        gal_only: bool, optional
            Only provide summary of the whole galaxy. Default: False.
        output : bool, optional
            Return the `maper` array when True. Default: False

        Configuration Parameters
        ------------------------
        Can be found in `self.config`:
        subpix : int, optional
            Subpixel sampling factor. Default is 5.

        """
        # Aperture mass profiles
        self.maper('gal')
        if not gal_only:
            self.maper('ins')
            self.maper('exs')

        # Aperture age profiles
        self.aprof('age', 'gal', return_mass=True)
        if not gal_only:
            self.aprof('age', 'ins', return_mass=True)
            self.aprof('age', 'exs', return_mass=True)

        # Aperture metallicity profiles
        self.aprof('met', 'gal')
        if not gal_only:
            self.aprof('met', 'ins')
            self.aprof('met', 'exs')

        # Gather these results into an Astropy Table
        aper_sum = Table()
        aper_sum.add_column(Column(data=self.rad_inn, name='rad_inn'))
        aper_sum.add_column(Column(data=self.rad_out, name='rad_out'))
        aper_sum.add_column(Column(data=self.rad_mid, name='rad_mid'))

        aper_sum.add_column(Column(data=self.maper_gal, name='maper_gal'))
        if not gal_only:
            aper_sum.add_column(Column(data=self.maper_ins, name='maper_ins'))
            aper_sum.add_column(Column(data=self.maper_exs, name='maper_exs'))

        aper_sum.add_column(
            Column(data=self.age_prof_gal['prof_w'], name='age_gal_w'))
        aper_sum.add_column(
            Column(data=self.age_prof_gal['prof'], name='age_gal'))
        aper_sum.add_column(
            Column(data=self.age_prof_gal['flag'], name='age_gal_flag'))
        aper_sum.add_column(
            Column(data=self.age_prof_gal['mass'], name='mprof_gal'))
        if not gal_only:
            aper_sum.add_column(
                Column(data=self.age_prof_ins['prof_w'], name='age_ins_w'))
            aper_sum.add_column(
                Column(data=self.age_prof_ins['prof'], name='age_ins'))
            aper_sum.add_column(
                Column(data=self.age_prof_ins['flag'], name='age_ins_flag'))
            aper_sum.add_column(
                Column(data=self.age_prof_ins['mass'], name='mprof_ins'))
            aper_sum.add_column(
                Column(data=self.age_prof_exs['prof_w'], name='age_exs_w'))
            aper_sum.add_column(
                Column(data=self.age_prof_exs['prof'], name='age_exs'))
            aper_sum.add_column(
                Column(data=self.age_prof_exs['flag'], name='age_exs_flag'))
            aper_sum.add_column(
                Column(data=self.age_prof_exs['mass'], name='mprof_exs'))
        aper_sum.add_column(
            Column(data=self.met_prof_gal['prof_w'], name='met_gal_w'))
        aper_sum.add_column(
            Column(data=self.met_prof_gal['prof'], name='met_gal'))
        aper_sum.add_column(
            Column(data=self.met_prof_gal['flag'], name='met_gal_flag'))

        if not gal_only:
            aper_sum.add_column(
                Column(data=self.met_prof_ins['prof_w'], name='met_ins_w'))
            aper_sum.add_column(
                Column(data=self.met_prof_ins['prof'], name='met_ins'))
            aper_sum.add_column(
                Column(data=self.met_prof_ins['flag'], name='met_ins_flag'))
            aper_sum.add_column(
                Column(data=self.met_prof_exs['prof_w'], name='met_exs_w'))
            aper_sum.add_column(
                Column(data=self.met_prof_exs['prof'], name='met_exs'))
            aper_sum.add_column(
                Column(data=self.met_prof_exs['flag'], name='met_exs_flag'))

        setattr(self, 'aper_sum', aper_sum.as_array())

        if output:
            return aper_sum
Beispiel #27
0
        'phi': phi,
        'psi': psi,
        'r': r,
        'x': x,
        'y': y,
        'intensity': intensity,
        'mcAlttel': mcAlttel,
        'mcAztel': mcAztel
    }
    ntuple = Table(output)

    #If destination fitsfile doesn't exist, will create a new one with proper headers
    if os.path.isfile(outfile) == False:
        if filetype == 'fits':
            #Convert Tables of data into HDUBinTables to write them into fits files
            pardata = ntuple.as_array()
            parheader = fits.Header()
            parheader.update(ntuple.meta)

            if storeimg == True:
                pixels = fits.ImageHDU(fitsdata)  #Image with pixel content

            #Write the data in an HDUList for storing in a fitsfile
            hdr = fits.Header(
            )  #Example header, we can add more things to this header
            hdr['TEL'] = 'LST1'
            primary_hdu = fits.PrimaryHDU(header=hdr)
            hdul = fits.HDUList([primary_hdu])
            hdul.append(fits.BinTableHDU(data=pardata, header=parheader))
            if storeimg == True:
                hdul.append(pixels)
Beispiel #28
0
def precompute_synthetic_grid(output_dirname,
                              ranges,
                              wavelengths,
                              to_resolution,
                              modeled_layers_pack,
                              atomic_linelist,
                              isotopes,
                              solar_abundances,
                              segments=None,
                              number_of_processes=1,
                              code="spectrum",
                              use_molecules=False,
                              steps=False,
                              tmp_dir=None):
    """
    Pre-compute a synthetic grid with some reference ranges (Teff, log(g) and
    MH combinations) and all the steps that iSpec will perform in the
    astrophysical parameter determination process.

    All the non-convolved spectra will be saved in a subdir and a complete
    grid file with the reference points already convolved will be saved in a
    FITS file for fast comparison.

    The output directory can be used by the routines 'model_spectrum' and
    'estimate_initial_ap'.
    """
    code = code.lower()
    if code not in ['spectrum', 'turbospectrum', 'moog', 'synthe', 'sme']:
        raise Exception("Unknown radiative transfer code: %s" % (code))

    reference_list_filename = output_dirname + "/parameters.tsv"
    if to_resolution is not None:
        reference_grid_filename = output_dirname + "/convolved_grid_%i.fits.gz" % to_resolution
    fits_dir = os.path.join(output_dirname, "grid/")
    mkdir_p(fits_dir)
    if steps:
        steps_fits_dir = os.path.join(output_dirname, "steps/")
        mkdir_p(steps_fits_dir)

    import dill  # To allow pickle of lambda functions (e.g., one element in modeled_layers_pack)
    import pickle
    pickled_modeled_layers_pack = pickle.dumps(modeled_layers_pack)

    # For code != "grid", ranges are always in position 7 (for grid it would be in position 8)
    valid_ranges = modeled_layers_pack[7]
    teff_range = valid_ranges['teff']
    logg_range = valid_ranges['logg']
    MH_range = valid_ranges['MH']
    alpha_range = valid_ranges.get(
        'alpha', (-1.5, 1.5)
    )  # Default (0.,) if 'alpha' is not a free parameter for atmosphere interpolation
    vmic_range = valid_ranges.get(
        'vmic', (0.0, 50.)
    )  # Default (0.,) if 'vmic' is not a free parameter for atmosphere interpolation

    # Parallelization pool
    if number_of_processes == 1:
        pool = None
    else:
        pool = Pool(number_of_processes)

    # Create grid binary file
    elapsed = 0  # seconds

    num_ref_spec = len(ranges)
    num_spec = num_ref_spec * 9  # Reference + 8 variations in Teff, logg, MH, alpha, vmic, vmac, vsini, limb darkening coeff

    i = 0
    for teff, logg, MH, alpha, vmic in ranges:
        if vmic is None:
            vmic = estimate_vmic(teff, logg, MH)
        vmac = 0.0  # This can be modified after synthesis if needed
        vsini = 0.0  # This can be modified after synthesis if needed
        limb_darkening_coeff = 0.00  # This can be modified after synthesis if needed
        resolution = 0  # This can be modified after synthesis if needed
        is_step = False
        if not valid_atmosphere_target(modeled_layers_pack, {
                'teff': teff,
                'logg': logg,
                'MH': MH,
                'alpha': alpha
        }):
            raise Exception(
                "Target parameters out of the valid ranges: teff={} logg={} MH={} alpha={}"
                .format(teff, logg, MH, alpha))
        points = [
            (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
             is_step),
        ]
        if steps:
            is_step = True
            new_teff = teff + Constants.SYNTH_STEP_TEFF if teff + Constants.SYNTH_STEP_TEFF <= teff_range[
                -1] else teff - Constants.SYNTH_STEP_TEFF
            new_logg = logg + Constants.SYNTH_STEP_LOGG if logg + Constants.SYNTH_STEP_LOGG <= logg_range[
                -1] else logg - Constants.SYNTH_STEP_LOGG
            new_MH = MH + Constants.SYNTH_STEP_MH if MH + Constants.SYNTH_STEP_MH <= MH_range[
                -1] else MH - Constants.SYNTH_STEP_MH
            new_alpha = alpha + Constants.SYNTH_STEP_ALPHA if alpha + Constants.SYNTH_STEP_ALPHA <= alpha_range[
                -1] else alpha - Constants.SYNTH_STEP_ALPHA
            new_vmic = vmic + Constants.SYNTH_STEP_VMIC if vmic + Constants.SYNTH_STEP_VMIC <= vmic_range[
                -1] else vmic - Constants.SYNTH_STEP_VMIC
            # For each reference point, calculate also the variations that iSpec will perform in the first iteration
            points += [  # Final unconvolved spectra where vmic/vmac are free and do not follow vmic/vmac empirical relations
                (new_teff, logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, new_alpha, vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
                (teff, logg, MH, alpha, new_vmic, vmac, vsini,
                 limb_darkening_coeff, is_step),
            ]
            points += [
                # Final unconvolved spectra where vmic is not free and does follow vmic empirical relations
                (new_teff, logg, MH, alpha, estimate_vmic(new_teff, logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, new_logg, MH, alpha, estimate_vmic(teff, new_logg, MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
                (teff, logg, new_MH, alpha, estimate_vmic(teff, logg, new_MH),
                 vmac, vsini, limb_darkening_coeff, is_step),
            ]

        for j, (teff, logg, MH, alpha, vmic, vmac, vsini, limb_darkening_coeff,
                is_step) in enumerate(points):
            if is_step:
                filename_out = steps_fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"
            else:
                filename_out = fits_dir + "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                    int(teff), logg, MH, alpha, vmic, vmac, vsini,
                    limb_darkening_coeff) + ".fits.gz"

            if os.path.exists(filename_out):
                print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                      limb_darkening_coeff, "already computed")
                continue

            if pool is None:
                if sys.platform == "win32":
                    # On Windows, the best timer is time.clock()
                    default_timer = time.clock
                else:
                    # On most other platforms the best timer is time.time()
                    default_timer = time.time

                lock = FileLock(filename_out + ".lock")
                try:
                    lock.acquire(timeout=-1)  # Don't wait
                except (LockTimeout, AlreadyLocked) as e:
                    # Some other process is computing this spectrum, do not continue
                    print("Skipping", teff, logg, MH, alpha, vmic, vmac, vsini,
                          limb_darkening_coeff, "already locked")
                    continue

                try:
                    tcheck = default_timer()
                    # Validate parameters
                    __generate_synthetic_fits(filename_out,
                                              wavelengths,
                                              segments,
                                              teff,
                                              logg,
                                              MH,
                                              alpha,
                                              vmic,
                                              vmac,
                                              vsini,
                                              limb_darkening_coeff,
                                              resolution,
                                              pickled_modeled_layers_pack,
                                              atomic_linelist,
                                              isotopes,
                                              solar_abundances,
                                              code=code,
                                              use_molecules=use_molecules,
                                              tmp_dir=tmp_dir,
                                              locked=True)
                    elapsed = default_timer() - tcheck

                    print(
                        "-----------------------------------------------------"
                    )
                    print("Remaining time:")
                    print("\t", (num_spec - i) * elapsed, "seconds")
                    print("\t", (num_spec - i) * (elapsed / 60), "minutes")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60)),
                          "hours")
                    print("\t", (num_spec - i) * (elapsed / (60 * 60 * 24)),
                          "days")
                    print(
                        "-----------------------------------------------------"
                    )
                finally:
                    lock.release()

            else:
                pool.apply_async(__generate_synthetic_fits, [
                    filename_out, wavelengths, segments, teff, logg, MH, alpha,
                    vmic, vmac, vsini, limb_darkening_coeff, resolution,
                    pickled_modeled_layers_pack, atomic_linelist, isotopes,
                    solar_abundances
                ],
                                 kwds={
                                     'code': code,
                                     'use_molecules': use_molecules,
                                     'tmp_dir': tmp_dir,
                                     'locked': False
                                 })
            i += 1

    if pool is not None:
        pool.close()
        pool.join()

    # Create parameters.tsv
    reference_list = Table()
    if len(np.unique(ranges[['logg', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_teff', dtype=int))
    else:
        reference_list.add_column(Column(name='teff', dtype=int))
    if len(np.unique(ranges[['teff', 'MH', 'alpha', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_logg', dtype=float))
    else:
        reference_list.add_column(Column(name='logg', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'alpha',
                             'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_MH', dtype=float))
    else:
        reference_list.add_column(Column(name='MH', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'vmic']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_alpha', dtype=float))
    else:
        reference_list.add_column(Column(name='alpha', dtype=float))
    if len(np.unique(ranges[['teff', 'logg', 'MH', 'alpha']])) == len(ranges):
        reference_list.add_column(Column(name='fixed_vmic', dtype=float))
    else:
        reference_list.add_column(Column(name='vmic', dtype=float))
    reference_list.add_column(Column(name='filename', dtype='|S100'))
    for teff, logg, MH, alpha, vmic in ranges:
        # Only use the first spectra generated for each combination
        zero_vmac = 0.0
        zero_vsini = 0.0
        zero_limb_darkening_coeff = 0.00
        reference_filename_out = "./grid/{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
            int(teff), logg, MH, alpha, vmic, zero_vmac, zero_vsini,
            zero_limb_darkening_coeff) + ".fits.gz"
        reference_list.add_row(
            (int(teff), logg, MH, alpha, vmic, reference_filename_out))

    if not os.path.exists(reference_list_filename):
        lock = FileLock(reference_list_filename + ".lock")
        try:
            lock.acquire(timeout=-1)  # Don't wait
        except (LockTimeout, AlreadyLocked) as e:
            # Some other process is writing this file, do not continue
            print("Skipping", reference_list_filename, "already locked")
        else:
            try:
                ascii.write(reference_list,
                            reference_list_filename,
                            delimiter='\t',
                            overwrite=True)
                print("Written", reference_list_filename)
            finally:
                lock.release()

    if to_resolution is not None:
        if not os.path.exists(reference_grid_filename):
            lock = FileLock(reference_grid_filename + ".lock")
            try:
                lock.acquire(timeout=-1)  # Don't wait
            except (LockTimeout, AlreadyLocked) as e:
                # Some other process is computing this spectrum, do not continue
                print("Skipping", reference_grid_filename, "already locked")
            else:
                try:
                    reference_grid = None
                    complete_reference_list = Table()
                    complete_reference_list.add_column(
                        Column(name='teff', dtype=int))
                    complete_reference_list.add_column(
                        Column(name='logg', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='MH', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='alpha', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmic', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vmac', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='vsini', dtype=float))
                    complete_reference_list.add_column(
                        Column(name='limb_darkening_coeff', dtype=float))
                    for teff, logg, MH, alpha, vmic in ranges:
                        # Only use the first spectra generated for each combination
                        zero_vmac = 0.0
                        zero_vsini = 0.0
                        zero_limb_darkening_coeff = 0.00
                        vmac = estimate_vmac(teff, logg, MH)
                        vsini = 1.6  # Sun
                        limb_darkening_coeff = 0.6
                        reference_filename_out = "{0}_{1:.2f}_{2:.2f}_{3:.2f}_{4:.2f}_{5:.2f}_{6:.2f}_{7:.2f}".format(
                            int(teff), logg, MH, alpha, vmic, zero_vmac,
                            zero_vsini, zero_limb_darkening_coeff) + ".fits.gz"
                        if not os.path.exists(fits_dir +
                                              reference_filename_out):
                            continue
                        complete_reference_list.add_row(
                            (int(teff), logg, MH, alpha, vmic, vmac, vsini,
                             limb_darkening_coeff))

                        # Spectra in the grid is convolved to the specified resolution for fast comparison
                        print("Quick grid:", reference_filename_out)
                        spectrum = read_spectrum(fits_dir +
                                                 reference_filename_out)

                        segments = None
                        vrad = (0, )
                        spectrum['flux'] = apply_post_fundamental_effects(spectrum['waveobs'], spectrum['flux'], segments, \
                                    macroturbulence=vmac, vsini=vsini, \
                                    limb_darkening_coeff=limb_darkening_coeff, R=to_resolution, vrad=vrad)

                        if reference_grid is None:
                            reference_grid = spectrum['flux']
                        else:
                            reference_grid = np.vstack(
                                (reference_grid, spectrum['flux']))

                    if len(ranges) == len(complete_reference_list):
                        # Generate FITS file with grid for fast comparison
                        primary_hdu = fits.PrimaryHDU(reference_grid)
                        wavelengths_hdu = fits.ImageHDU(wavelengths,
                                                        name="WAVELENGTHS")
                        params_bintable_hdu = fits.BinTableHDU(
                            complete_reference_list.as_array(), name="PARAMS")
                        fits_format = fits.HDUList([
                            primary_hdu, wavelengths_hdu, params_bintable_hdu
                        ])
                        fits_format.writeto(reference_grid_filename,
                                            overwrite=True)
                        print("Written", reference_grid_filename)
                finally:
                    lock.release()
Beispiel #29
0
    def test_write_bintable(self):
        """Test writing binary tables to FITS.
        """
        from ..io.util import write_bintable, fitsheader
        #
        # Input: Table
        #
        hdr = fitsheader(dict(A=1, B=2))
        hdr['C'] = ('BLAT', 'FOO')
        data = Table()
        data['X'] = [1, 2, 3]
        data['Y'] = [3, 4, 5]
        write_bintable(self.testfile, data, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: ndarray
        #
        hdr = dict(A=1, B=2)
        data = data.as_array()
        write_bintable(self.testfile, data, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: dictionary
        #
        hdr = dict(A=1, B=2)
        d = dict(X=np.array([1, 2, 3]), Y=np.array([3, 4, 5]))
        write_bintable(self.testfile, d, header=hdr)
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)

        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))

        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        os.remove(self.testfile)
        #
        # Input: Table with column comments.
        #
        hdr = fitsheader(dict(A=1, B=2))
        hdr['C'] = ('BLAT', 'FOO')
        data = Table()
        data['X'] = [1, 2, 3]
        data['Y'] = [3, 4, 5]
        write_bintable(self.testfile,
                       data,
                       header=hdr,
                       comments={
                           'X': 'This is X',
                           'Y': 'This is Y'
                       },
                       units={
                           'X': 'mm',
                           'Y': 'mm'
                       })
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile, header=True)
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        self.assertEqual(newhdr['TTYPE1'], 'X')
        self.assertEqual(newhdr.comments['TTYPE1'], 'This is X')
        self.assertEqual(newhdr['TTYPE2'], 'Y')
        self.assertEqual(newhdr.comments['TTYPE2'], 'This is Y')
        self.assertEqual(newhdr['TUNIT1'], 'mm')
        self.assertEqual(newhdr.comments['TUNIT1'], 'X units')
        self.assertEqual(newhdr['TUNIT2'], 'mm')
        self.assertEqual(newhdr.comments['TUNIT2'], 'Y units')
        #
        # Input: Table with no EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr)
        #
        # Input: Table with EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')
        #
        # Standard suite of table tests.
        #
        result, newhdr = fits.getdata(self.testfile,
                                      header=True,
                                      extname='FOOBAR')
        self.assertEqual(sorted(result.dtype.names), sorted(data.dtype.names))
        for colname in data.dtype.names:
            self.assertTrue(np.all(result[colname] == data[colname]),
                            '{} data mismatch'.format(colname))
        # self.assertEqual(newhdr.comments['C'], 'FOO')
        for key in hdr.keys():
            self.assertIn(key, newhdr)
        self.assertIn('DATASUM', newhdr)
        self.assertIn('CHECKSUM', newhdr)
        #
        # Input: Table with existing EXTNAME, existing file
        #
        write_bintable(self.testfile, data, header=hdr, extname='FOOBAR')
        #
        # Input: Table with EXTNAME, existing file, overwrite
        #
        write_bintable(self.testfile,
                       data,
                       header=hdr,
                       extname='FOOBAR',
                       clobber=True)
Beispiel #30
0
    def __parse_result(self, row):
        """
        Used to turn the catolog entries into dictionaries of information.

        Input
            ------
            row: astropy row or single entry table
                The table to convert.
        
        Output 
            ------
            data: dic [keys=("Name","Otype","ra","dec","Distance","Mag","Sptype","Redshift","Luminosity")
            """

        # Attempt to convert a row to a table (tables can be converted to arrays, but rows not)
        as_table = Table(row)

        # Extract information to single variables
        [
            main, name, ids, idngc, idm, idhd, ra, dec, otype, sptype, plx,
            redshift, mag, dist, distu, distmeth
        ] = as_table.as_array()[0]

        # Make a string of all valid names
        usenames = ""
        for i in [name, idm, idngc, idhd]:
            if i != '' and i != 0:
                usenames += i + ", "

        data = {"Name": usenames[:-2]}

        # Go through the list, check if variables have information, and add them to dictionary if
        # information is found
        if mag != None:
            data["Mag"] = "%.3f" % mag
        if redshift > 0.001:
            data["Redshift"] = "%.3f" % redshift
        if otype != None:
            data["Otype"] = otype
        else:
            data["Otype"] = "Unknown"
        if sptype != '':
            data["Sptype"] = str(sptype)

        if plx > 0:  # Distance calculations from parallax
            data["Plx"] = str(plx)
            distpc = 1000. / plx
            distly = distpc * 3.2616
            data["Distance"] = "%.2f pc, %.2f ly" % (distpc, distly)

        if redshift != None:  # Distance estimates from redshift
            data["redshift"] = redshift

        if plx > 0 and mag != None:  # Luminosity estimates
            Msun = 4.74
            absmag = mag - 5 * (np.log10(distpc) - 1)
            L = 10**(1. / 2.5 * (Msun - absmag))
            data["Luminosity"] = "%.2f" % L

        if dist > 0:
            data["Distance"] = "%.2f %s" % (dist, distu)

        # Add right ascencion and declination information
        data["ra"] = "%s hrs" % ra
        data["dec"] = "%s deg" % dec
        data["celcoor"] = [utils.hourangle(ra), utils.degree(dec)]
        if "location" in dir(self):
            loc = self.location
        if "home" in dir(self):
            loc = self.home

        altaz = self.Transform.cel2altaz(data["celcoor"], loc)[0]
        data["altaz"] = altaz
        data["az"] = "%.2f deg" % altaz[0]
        data["alt"] = "%.2f deg" % altaz[1]

        # This is for a string with all of the information. Goes down the list, if a key exists, adds
        # infomation to string. Useful for displaying everything easily.
        order = [
            "Name", "Otype", "ra", "dec", "az", "alt", "Mag", "Distance",
            "Sptype", "Luminosity", "Redshift"
        ]

        outstring = ''
        for key in order:
            if key in data.keys():
                outstring += key + ": " + data[key] + "\n"

        data["String"] = outstring

        return data
from astropy.io import fits
from astropy.table import Table
import numpy as np

e_bins = 20
o_bins = 5
e_axis = np.logspace(-1, 2, e_bins + 1)
o_axis = np.linspace(0, 3, o_bins + 1)
effarea = np.ones([e_bins, o_bins]) * 3e5

table = Table(
    [[e_axis[:-1]], [e_axis[1:]], [o_axis[:-1]], [o_axis[1:]], [effarea]],
    names=('ENERG_LO', 'ENERG_HI', 'THETA_LO', 'THETA_HI', 'EFFAREA'))

data = table.as_array()
header = fits.Header()
header['OBS_ID'] = 31415, 'Observation ID'
header['LO_THRES'] = 0.1, 'Low energy threshold [TeV]'
header['HI_THRES'] = 50, 'High energy threshold [TeV]'
header[
    'HDUDOC'] = 'https://github.com/open-gamma-ray-astro/gamma-astro-data-formats', ''
header['HDUVERS'] = '0.2', ''
header['HDUCLASS'] = 'GADF', ''
header['HDUCLAS1'] = 'RESPONSE', ''
header['HDUCLAS2'] = 'EFF_AREA', ''
header['HDUCLAS3'] = 'FULL-ENCLOSURE', ''
header['HDUCLAS4'] = 'AEFF_2D', ''

tbhdu = fits.BinTableHDU(data, header, name='EFFECTIVE AREA')
Beispiel #32
0
    def makeBFLineCube(self, cfg_par):

        cubeletsDir = cfg_par['general']['cubeletsDir']
        cubeDir = cfg_par['general']['cubeDir']
        momDir = cfg_par['general']['momDir']

        if cfg_par['bestFitSel']['BFcube']['rotationID'] == True:
            modelCube = fits.open(cfg_par['bestFitSel']['BFcube']['modelCube'])
            mdC = modelCube[0].data
            rotMoM = np.zeros([mdC.shape[1], mdC.shape[2]]) * np.nan

        hdul = fits.open(cfg_par['general']['outTableName'])
        tabGen = hdul['BININFO'].data

        residuals = hdul['Residuals_' + cfg_par['gFit']['modName']].data
        ancels = hdul['Ancels' + cfg_par['gFit']['modName']].data
        rotArr = np.empty(len(ancels['BIN_ID']))

        bF = np.array(residuals['bestFit'], dtype=int)

        wave, xAxis, yAxis, pxSize, noiseBin, vorBinInfo, dataSpec = tP.openVorLineOutput(
            cfg_par, cfg_par['general']['outVorLineTableName'],
            cfg_par['general']['outVorSpectra'])

        lineInfo = tP.openLineList(cfg_par)
        lineInfoAll = lineInfo.copy()
        index = np.where(lineInfo['BFCUbe'] == 0)
        fltr = np.array(index)[0]
        lineInfo.remove_rows(list(fltr))

        lineNameStr = str(lineInfo['Name'][0])

        if '[' in lineNameStr:
            lineName = lineNameStr.replace("[", "")
            lineName = lineName.replace("]", "")
            lineName = lineName + str(int(lineInfo['Wave'][0]))
        else:
            lineName = lineNameStr + str(int(lineInfo['Wave'][0]))

        lineNameName = lineNameStr + str(int(lineInfo['Wave'][0]))

        lineNameStr = np.array([lineNameStr + str(int(lineInfo['Wave'][0]))],
                               dtype='a16')

        lineNamesStrAll = np.empty(len(lineInfoAll), dtype='a16')
        for ii in range(0, len(lineInfoAll['ID'])):
            #for ii in range(0,1):

            lineNameStrAll = str(lineInfoAll['Name'][ii])

            if '[' in lineNameStrAll:
                lineNameAll = lineNameStrAll.replace("[", "")
                lineNameAll = lineNameAll.replace("]", "")
                lineNameAll = lineNameAll + str(int(lineInfoAll['Wave'][ii]))
            else:
                lineNameAll = lineNameStrAll + str(int(
                    lineInfoAll['Wave'][ii]))

            lineNamesStrAll[ii] = str(lineNameStrAll +
                                      str(int(lineInfoAll['Wave'][ii])))

        indexLine = np.where(lineNamesStrAll == lineNameStr)[0]
        modName = 'BF'
        f = fits.open(cubeDir + 'fitCube_' + modName + '.fits')
        dd = f[0].data
        hh = f[0].header
        mom = fits.open(momDir + 'g2/mom0_tot-' + lineName + '.fits')
        mm = mom[0].data
        indexFltr = np.broadcast_to(np.isnan(mm), dd.shape)
        dd[indexFltr] = np.nan
        #lambdaMin = np.log(cfg_par['gFit']['lambdaMin'])
        #lambdaMax = np.log(cfg_par['gFit']['lambdaMax'])
        #idxMin = int(np.where(abs(wave-lambdaMin)==abs(wave-lambdaMin).min())[0])
        #idxMax = int(np.where(abs(wave-lambdaMax)==abs(wave-lambdaMax).min())[0])

        lambdaMin = np.log(cfg_par['gFit']['lambdaMin'])
        lambdaMax = np.log(cfg_par['gFit']['lambdaMax'])
        idxMin = int(
            np.where(abs(wave - lambdaMin) == abs(wave - lambdaMin).min())[0])
        idxMax = int(
            np.where(abs(wave - lambdaMax) == abs(wave - lambdaMax).min())[0])

        wave = wave[idxMin:idxMax]
        dd = dd[idxMin:idxMax, :, :]

        velRangeMin = cvP.vRadLambda(
            -cfg_par['bestFitSel']['BFcube']['velRange'][0],
            lineInfo['Wave'][0]) - lineInfo['Wave'][0]
        velRangeMax = cvP.vRadLambda(
            cfg_par['bestFitSel']['BFcube']['velRange'][1],
            lineInfo['Wave'][0]) - lineInfo['Wave'][0]

        waveMin = np.log(lineInfo['Wave'][0] - velRangeMin)
        waveMax = np.log(lineInfo['Wave'][0] + velRangeMax)

        idxMin1 = int(
            np.where(abs(wave - waveMin) == abs(wave - waveMin).min())[0])
        idxMax1 = int(
            np.where(abs(wave - waveMax) == abs(wave - waveMax).min())[0])

        wave = wave[idxMin1:idxMax1]

        waveAng = np.exp(wave)

        vel = cvP.lambdaVRad(waveAng, lineInfo['Wave'][0]) + float(
            cfg_par['general']['velsys'])

        dd = dd[idxMin1:idxMax1, :, :]
        fitCube = np.empty([dd.shape[0], dd.shape[1], dd.shape[2]])
        fitCubeMask = np.zeros([dd.shape[0], dd.shape[1], dd.shape[2]])
        fitCubeMD = np.zeros([dd.shape[0], dd.shape[1], dd.shape[2]])

        fitCubeMaskInter = np.zeros([dd.shape[0], dd.shape[1], dd.shape[2]])
        vecSumMap = np.zeros([dd.shape[1], dd.shape[2]]) * np.nan
        lenghtLineMap = np.zeros([dd.shape[1], dd.shape[2]]) * np.nan

        for i in range(0, len(ancels['BIN_ID'])):

            match_bin = np.where(tabGen['BIN_ID'] == ancels['BIN_ID'][i])[0]

            if bF[i] == 0:
                modName = 'g1'
            elif bF[i] == 1:
                modName = 'g2'

            for index in match_bin:

                if np.sum(~np.isnan(dd[:,
                                       int(tabGen['PixY'][index]),
                                       int(tabGen['PixX'][index])])) != 0:

                    result = load_modelresult(
                        cfg_par['general']['runNameDir'] + 'models/' +
                        modName + '/' + str(ancels['BIN_ID'][i]) + '_' +
                        modName + '.sav')
                    comps = result.eval_components()

                    if modName == 'g1':
                        fit = comps['g1ln' + str(indexLine[0]) + '_']
                    elif modName == 'g2':
                        fit = comps['g1ln' + str(indexLine[0]) +
                                    '_'] + comps['g2ln' + str(indexLine[0]) +
                                                 '_']

                    fitCube[:,
                            int(tabGen['PixY'][index]),
                            int(tabGen['PixX'][index])] = fit[idxMin1:idxMax1]

                    if cfg_par['bestFitSel']['BFcube']['rotationID'] == True:
                        mdSpec = mdC[:,
                                     int(tabGen['PixY'][index]),
                                     int(tabGen['PixX'][index])]

                        mdSpec[mdSpec != 0] = 1.
                        mdSpec = np.flipud(mdSpec)
                        fitCubeMD[:,
                                  int(tabGen['PixY'][index]),
                                  int(tabGen['PixX'][index])] = mdSpec
                        #centroid = ancels['centroid_'+lineName][i]
                        #width = ancels['w80_'+lineName][i]
                        fitSmall = fit[idxMin1:idxMax1]
                        idxPeak = np.nanargmax(fitSmall)
                        #print(idxPeak)
                        idxLeft = int(
                            np.where(
                                abs(fitSmall[:idxPeak] -
                                    10.) == abs(fitSmall[:idxPeak] -
                                                10.).min())[0])
                        idxRight = int(
                            np.where(
                                abs(fitSmall[idxPeak:] -
                                    10.) == abs(fitSmall[idxPeak:] -
                                                10.).min())[0]) + idxPeak

                        #print(idxLeft,idxRight)
                        #velMin = centroid-(width/2.)
                        #velMax = centroid+(width/2.)
                        #indexVelMin = int(np.where(abs(vel-velMin)==abs(vel-velMin).min())[0])
                        #indexVelMax = int(np.where(abs(vel-velMax)==abs(vel-velMax).min())[0])

                        fitMask = np.zeros(len(fit[idxMin1:idxMax1]))
                        fitMaskIntercect = np.zeros(len(fit[idxMin1:idxMax1]))
                        #fitMask[indexVelMin:indexVelMax] = 1.

                        fitMask[idxLeft:idxRight] = 1.
                        lenghtLine = np.count_nonzero(fitMask == 1.)

                        fitCubeMask[:,
                                    int(tabGen['PixY'][index]),
                                    int(tabGen['PixX'][index])] = fitMask
                        #print(fitMask,mdSpec)
                        vecCount = np.where((fitMask == 1.) & (mdSpec == 1.))
                        fitMaskIntercect[vecCount] = 1.

                        vecSum = np.count_nonzero(fitMaskIntercect == 1.)
                        fitCubeMaskInter[:,
                                         int(tabGen['PixY'][index]),
                                         int(tabGen['PixX'][index]
                                             )] = fitMaskIntercect

                        vecSumMap[int(tabGen['PixY'][index]),
                                  int(tabGen['PixX'][index])] = vecSum

                        lenghtLineMap[
                            int(tabGen['PixY'][index]),
                            int(tabGen['PixX'][index]
                                )] = lenghtLine / 100. * cfg_par['bestFitSel'][
                                    'BFcube']['rotationPercent']

                        if vecSum > (lenghtLine / 100. * cfg_par['bestFitSel']
                                     ['BFcube']['rotationPercent']):
                            rotArr[i] = 1.
                            rotMoM[int(tabGen['PixY'][index]),
                                   int(tabGen['PixX'][index])] = 1.

                        else:
                            rotArr[i] = 0.
                            rotMoM[int(tabGen['PixY'][index]),
                                   int(tabGen['PixX'][index])] = 0.

                else:
                    fitCube[:,
                            int(tabGen['PixY'][index]),
                            int(tabGen['PixX'][index])] = np.nan
                    fitCubeMask[:,
                                int(tabGen['PixY'][index]),
                                int(tabGen['PixX'][index])] = np.nan
                    fitCubeMD[:,
                              int(tabGen['PixY'][index]),
                              int(tabGen['PixX'][index])] = np.nan

                    fitCubeMaskInter[:,
                                     int(tabGen['PixY'][index]),
                                     int(tabGen['PixX'][index])] = np.nan
                    vecSumMap[int(tabGen['PixY'][index]),
                              int(tabGen['PixX'][index])] = np.nan
                    lenghtLineMap[int(tabGen['PixY'][index]),
                                  int(tabGen['PixX'][index])] = np.nan

        waveAng = np.exp(wave)

        header = self.makeHeader(cfg_par, lineInfo['Wave'][0], hh, waveAng)

        outCubelet = cubeletsDir + str(lineNameName) + '_BF.fits'
        outCubeletMask = cubeletsDir + str(lineNameName) + '_BFMask.fits'
        outCubeletMaskfl = cubeletsDir + str(
            lineNameName) + '_BFMaskInter.fits'
        outCubeletMaskMD = cubeletsDir + str(lineNameName) + '_BFMD.fits'

        fits.writeto(outCubelet,
                     np.flip(fitCube, axis=0),
                     header,
                     overwrite=True)
        fits.writeto(outCubeletMask,
                     np.flip(fitCubeMask, axis=0),
                     header,
                     overwrite=True)
        fits.writeto(outCubeletMaskfl,
                     np.flip(fitCubeMaskInter, axis=0),
                     header,
                     overwrite=True)
        fits.writeto(outCubeletMaskMD,
                     np.flip(fitCubeMD, axis=0),
                     header,
                     overwrite=True)

        if cfg_par['bestFitSel']['BFcube']['rotationID'] == True:

            outMomRot = momDir + str(lineNameName) + '_RotMom.fits'
            outMomSum = momDir + str(lineNameName) + '_SumInter.fits'
            outMomLength = momDir + str(lineNameName) + '_LengthLine.fits'
            outMomDiff = momDir + str(lineNameName) + '_diffInter.fits'

            if 'CUNIT3' in header:
                del header['CUNIT3']
            if 'CTYPE3' in header:
                del header['CTYPE3']
            if 'CDELT3' in header:
                del header['CDELT3']
            if 'CRVAL3' in header:
                del header['CRVAL3']
            if 'CRPIX3' in header:
                del header['CRPIX3']
            if 'NAXIS3' in header:
                del header['NAXIS3']
            header['WCSAXES'] = 2
            header['SPECSYS'] = 'topocent'
            header['BUNIT'] = 'Jy/beam'

            fits.writeto(outMomRot, rotMoM, header, overwrite=True)
            fits.writeto(outMomSum, vecSumMap, header, overwrite=True)
            fits.writeto(outMomLength, lenghtLineMap, header, overwrite=True)
            fits.writeto(outMomDiff,
                         vecSumMap - lenghtLineMap,
                         header,
                         overwrite=True)

            t = Table(ancels)

            if 'RotMod' not in ancels.dtype.names:
                t.add_column(Column(rotArr, name='RotMod'))
            else:
                t.replace_column('RotMod', Column(rotArr, name='RotMod'))

            try:
                tt = Table(hdul['AncelsBF'].data)
                hdul['AncelsBF'] = fits.BinTableHDU(t.as_array(),
                                                    name='AncelsBF')
            except KeyError as e:
                tt = fits.BinTableHDU.from_columns(t.as_array(),
                                                   name='AncelsBF')
                hdul.append(tt)

        hdul.writeto(cfg_par['general']['outTableName'], overwrite=True)

        return
def importing_data(password, user, Obs_date, Special_id1, Vertex1, Vertex2,
                   Vertex3, Vertex4, Instrument, Mode, lambdaMu, eta, pv,
                   relative_reflectance):
    #check if wavelenght is too big
    try:
        for wav1 in lambdaMu:
            if wav1 > 29.9999:
                raise ValueError("Required wavelenght", wav1,
                                 "is too big. Max 29.9999 micron")
    except:
        if lambdaMu > 29.9999:
            raise ValueError("Required wavelenght", lambdaMu,
                             "is too big. Max 29.9999 micron")
    # Open database connection
    db = pymysql.connect("localhost", "{0}".format(user),
                         "{0}".format(password))
    # prepare a cursor object using cursor() method and go to the right database
    cursor = db.cursor()
    cursor.execute("USE ISPY")
    #import url and make a table
    url, z, TYPE = Url_for_ISPY_SkyCoord.ISPY_ephemeris_inSkyCoord(
        'JWST', Obs_date, Special_id1, Vertex1, Vertex2, Vertex3, Vertex4)
    #delete the old data
    sql3 = "DROP TABLE IF EXISTS {0} ".format(Special_id1)
    try:
        # Execute the SQL command
        cursor.execute(sql3)
        # Commit your changes in the database
        db.commit()
    except:
        raise ValueError(
            "Error: unable to delete data in the old table to update it")
    #get table name and create a table for this Specific ID
    table_name, brightness = Setup_Table_in_MySQL_to_Fill_in_Data.makeMySQLtable(
        Special_id1, password, user, Obs_date, Vertex1, Vertex2, Vertex3,
        Vertex4, Instrument, Mode, lambdaMu)
    Special_id = table_name
    #update the fact that the data has been deleted
    time1 = strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ' UTC'
    sql11 = "UPDATE INPUT_Table SET Status='NULL',Number_of_Asteroids_Detected=0, Time_Updated_UTC='{0}' where OBS_id='{1}'".format(
        time1, Special_id)
    try:
        # Execute the SQL command
        cursor.execute(sql11)
        # Commit your changes in the database
        db.commit()
    except:
        # Rollback in case there is any error
        db.rollback()
        raise ValueError("Unable to update the status for OBS_id:" +
                         Special_id + " in the INPUT_Table.")

    #check if there are any data on the website
    if z == None:
        #No results
        #update Status in the input table to know if that line has been run already
        time1 = strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ' UTC'
        sql11 = "UPDATE INPUT_Table SET Status='UPDATED',Number_of_Asteroids_Detected=0, Time_Updated_UTC='{0}' where OBS_id='{1}'".format(
            time1, Special_id)
        #print (sql11)
        try:
            # Execute the SQL command
            cursor.execute(sql11)
            # Commit your changes in the database
            db.commit()
        except:
            # Rollback in case there is any error
            db.rollback()
            raise ValueError("Unable to update the status for OBS_id:" +
                             Special_id + "in the INPUT_Table.")

    else:
        #get the table and the tablelines
        distant, tablelines = Create_ISPY_Table.make_table(
            z, 'SPK-ID', 'EXPLANATION', 1, 1)
        #make the table into lines to put in the database
        f = Table.as_array(distant)

        #make a string of JPL_SPKID values
        JPL_SPKID = []
        #loop over all the rows in the table from ISPY
        for x in range(0, len(f)):
            JPL_SPKID.append(f[x][0])

            #table columns
            tab_col = [
                'JPL_SPKID', 'IAU_Number', 'Name_designation', 'RA', 'DEC1',
                'Amag', 'dRAcosD', 'dDEC_by_dt', 'CntDst', 'PsAng', 'Data_Arc',
                'Nobs', 'SMAA_3sig', 'SMIA_3sig', 'Theta', 'Pixel_x',
                'Pixel_y', 'Last_updated', table_name, 'OBS_id', Special_id,
                'H', 'G', 'alpha', 'r', 'delta', 'eta', 'pv',
                'Relative_reflectance'
            ]

            #add Horizons data and the brightness
            asteroid = str(f[x][2]).strip("(")
            asteroid = str(asteroid).strip(")")
            horizons, V = Calculate_Brightness.calculate_brightness(
                lambdaMu, eta, pv, Obs_date, asteroid, relative_reflectance)

            #get the reflectance brightness
            reflectance = Ref_Brightness.Reflectance(relative_reflectance, V,
                                                     lambdaMu)

            #add brightness values together
            Brightness = []
            try:
                if horizons[8] == []:
                    Brightness = 'empty'
                else:
                    for xx in range(0, len(reflectance)):
                        Brightness.append(horizons[8][xx] + reflectance[xx])

            except:
                if horizons[8] == []:
                    Brightness = 'empty'
                else:
                    Brightness = horizons[8] + reflectance

            #input new data
            sql1 = "INSERT INTO {0}(OBS_id,JPL_SPKID,Name_designation,\
                              RA ,\
                              DEC1) \
                   VALUES ('{2}',{1[0]},'{1[2]}','{1[3]}','{1[4]}')".format(
                tab_col[18], f[x], tab_col[20])

            try:
                # Execute the SQL command
                cursor.execute(sql1)
                # Commit your changes in the database
                db.commit()
            except:
                # Rollback in case there is any error
                db.rollback()
                raise ValueError("Unable to import into table OBS_id:" +
                                 Special_id)
            #loop over all the columns in f(add data from ISPY)
            for y in range(0, len(tab_col) - 14):
                if y == 0 or y == 2 or y == 3 or y == 4:
                    sql1 = 0
                elif ma.getmask(f[x][y]) == True:
                    # Prepare SQL query to INSERT a record into the database.
                    sql1 = 0
                elif f[x][y] == 'NA':
                    # Prepare SQL query to INSERT a record into the database.
                    sql1 = 0
                else:
                    sql1 = "UPDATE {0} SET {1}='{2}'\
                    where {3}={4}".format(tab_col[18], tab_col[y], f[x][y],
                                          tab_col[0], f[x][0])
                    #print (x,y,f[x][y])
                    try:
                        # Execute the SQL command
                        cursor.execute(sql1)
                        # Commit your changes in the database
                        db.commit()
                    except:
                        # Rollback in case there is any error
                        db.rollback()
                        raise ValueError(
                            "Unable to import into table OBS_id:" + Special_id)

            #loop over all the columns in horizons  (add data from Horizons and brightness)
            for yy in range(21, 29):
                sql3 = "UPDATE {0} SET {1}='{2}'\
                    where {3}={4}".format(tab_col[18], tab_col[yy],
                                          horizons[yy - 21], tab_col[0],
                                          f[x][0])
                #print (sql3)
                try:
                    # Execute the SQL command
                    cursor.execute(sql3)
                    # Commit your changes in the database
                    db.commit()
                except:
                    # Rollback in case there is any error
                    db.rollback()
                    raise ValueError("Unable to import into table OBS_id:" +
                                     Special_id)
            #print (horizons)
            try:
                for lam in range(0, len(lambdaMu)):
                    if horizons[8] == []:
                        sql4 = "Not possible to calculate brightness"

                    else:
                        #add values for brightness
                        sql4 = "UPDATE {0} SET `{1}`='{2}'\
                        where {3}={4}".format(tab_col[18], brightness[lam],
                                              Brightness[lam], tab_col[0],
                                              f[x][0])
                        #print (sql4)
                        try:
                            # Execute the SQL command
                            cursor.execute(sql4)
                            # Commit your changes in the database
                            db.commit()
                        except:
                            # Rollback in case there is any error
                            db.rollback()
                            raise ValueError(
                                "Unable to import into table OBS_id:" +
                                Special_id)
            except:
                if horizons[8] == []:
                    sql4 = "Not possible to calculate brightness"
                else:
                    #add values for brightness
                    sql4 = "UPDATE {0} SET `{1}`='{2}'\
                        where {3}={4}".format(tab_col[18], brightness[0],
                                              Brightness, tab_col[0], f[x][0])
                    #print (sql4)
                    try:
                        # Execute the SQL command
                        cursor.execute(sql4)
                        # Commit your changes in the database
                        db.commit()
                    except:
                        # Rollback in case there is any error
                        db.rollback()
                        raise ValueError(
                            "Unable to import into table OBS_id:" + Special_id)

        #check if the number of rows in f and in MySQL is the same (so if all the data has been imported)
        sql2 = "SELECT COUNT(*) FROM {0}".format(tab_col[18])

        try:
            # Execute the SQL command
            cursor.execute(sql2)
            # Fetch all the rows in a list of lists.
            result2 = cursor.fetchone()
            # Now check fetched result
            if result2[0] == len(f):
                print('For OBS ID ', Special_id,
                      ': All the data have been recorded into the database.')

                #update Status in the input table to know if that line has been run already
                time1 = strftime("%Y-%m-%d %H:%M:%S", gmtime()) + ' UTC'
                sql11 = "UPDATE INPUT_Table SET Status='UPDATED',Number_of_Asteroids_Detected={1}, Time_Updated_UTC='{2}' where OBS_id='{0}'".format(
                    tab_col[18], result2[0], time1)
                #print (sql11)
                try:
                    # Execute the SQL command
                    cursor.execute(sql11)
                    # Commit your changes in the database
                    db.commit()
                except:
                    # Rollback in case there is any error
                    db.rollback()
                    raise ValueError(
                        "Unable to update the status for OBS_id:" +
                        Special_id + "in the INPUT_Table.")

            else:
                print(
                    'For OBS ID ', Special_id,
                    ': The number of asteroids in the database is not the same as from ISPY'
                )

        except:
            raise ValueError(
                'For OBS ID ', Special_id,
                ": Error: unable to fetch data to check if all the data have been recorded into the database."
            )
    # disconnect from server
    db.close()
    return Special_id1
Beispiel #34
0
    def aper_summary(self, gal_only=False, output=False):
        """Get all the stellar mass, age, metallicity, and velocity dispersion profiles.

        Parameters
        ----------
        gal_only: bool, optional
            Only provide summary of the whole galaxy. Default: False.
        output : bool, optional
            Return the `maper` array when True. Default: False

        Configuration Parameters
        ------------------------
        Can be found in `self.config`:
        subpix : int, optional
            Subpixel sampling factor. Default is 5.

        """
        aper_sum = Table()

        # add expected values
        aper_sum.add_column(Column(data=self.rad_inn, name='rad_inn'))
        aper_sum.add_column(Column(data=self.rad_out, name='rad_out'))
        aper_sum.add_column(Column(data=self.rad_mid, name='rad_mid'))

        # aperture mass profiles if there and add to aper_sum table
        if 'map_star_rho_insitu_{}'.format(
                self.proj
        ) in self.hdf5_values and 'map_star_rho_exsitu_{}'.format(
                self.proj) in self.hdf5_values:
            self.maper('gal')

            aper_sum.add_column(Column(data=self.maper_gal, name='maper_gal'))

            if not gal_only:
                self.maper('ins')
                self.maper('exs')

                aper_sum.add_column(
                    Column(data=self.maper_ins, name='maper_ins'))
                aper_sum.add_column(
                    Column(data=self.maper_exs, name='maper_exs'))

        # aperture age profiles if there and add to aper_sum table
        if 'map_star_age_insitu_{}'.format(
                self.proj
        ) in self.hdf5_values and 'map_star_age_exsitu_{}'.format(
                self.proj) in self.hdf5_values:
            self.aprof('age', 'gal', return_mass=True)

            aper_sum.add_column(
                Column(data=self.age_prof_gal['prof_w'], name='age_gal_w'))
            aper_sum.add_column(
                Column(data=self.age_prof_gal['prof'], name='age_gal'))
            aper_sum.add_column(
                Column(data=self.age_prof_gal['flag'], name='age_gal_flag'))
            aper_sum.add_column(
                Column(data=self.age_prof_gal['mass'], name='mprof_gal'))

            if not gal_only:
                self.aprof('age', 'ins', return_mass=True)
                self.aprof('age', 'exs', return_mass=True)

                aper_sum.add_column(
                    Column(data=self.age_prof_ins['prof_w'], name='age_ins_w'))
                aper_sum.add_column(
                    Column(data=self.age_prof_ins['prof'], name='age_ins'))
                aper_sum.add_column(
                    Column(data=self.age_prof_ins['flag'],
                           name='age_ins_flag'))
                aper_sum.add_column(
                    Column(data=self.age_prof_ins['mass'], name='mprof_ins'))
                aper_sum.add_column(
                    Column(data=self.age_prof_exs['prof_w'], name='age_exs_w'))
                aper_sum.add_column(
                    Column(data=self.age_prof_exs['prof'], name='age_exs'))
                aper_sum.add_column(
                    Column(data=self.age_prof_exs['flag'],
                           name='age_exs_flag'))
                aper_sum.add_column(
                    Column(data=self.age_prof_exs['mass'], name='mprof_exs'))

        # Aperture metallicity profiles if there and add to aper_sum table
        if 'map_star_metallicity_insitu_{}'.format(
                self.proj
        ) in self.hdf5_values and 'map_star_metallicity_exsitu_{}'.format(
                self.proj) in self.hdf5_values:
            self.aprof('met', 'gal')

            aper_sum.add_column(
                Column(data=self.met_prof_gal['prof_w'], name='met_gal_w'))
            aper_sum.add_column(
                Column(data=self.met_prof_gal['prof'], name='met_gal'))
            aper_sum.add_column(
                Column(data=self.met_prof_gal['flag'], name='met_gal_flag'))

            if not gal_only:
                self.aprof('met', 'ins')
                self.aprof('met', 'exs')

                aper_sum.add_column(
                    Column(data=self.met_prof_ins['prof_w'], name='met_ins_w'))
                aper_sum.add_column(
                    Column(data=self.met_prof_ins['prof'], name='met_ins'))
                aper_sum.add_column(
                    Column(data=self.met_prof_ins['flag'],
                           name='met_ins_flag'))
                aper_sum.add_column(
                    Column(data=self.met_prof_exs['prof_w'], name='met_exs_w'))
                aper_sum.add_column(
                    Column(data=self.met_prof_exs['prof'], name='met_exs'))
                aper_sum.add_column(
                    Column(data=self.met_prof_exs['flag'],
                           name='met_exs_flag'))

        # Aperture velocity dispersion profiles if there and add to aper_sum table
        if 'map_star_sigma_insitu_{}'.format(
                self.proj
        ) in self.hdf5_values and 'map_star_sigma_exsitu_{}'.format(
                self.proj) in self.hdf5_values and 'map_star_sigma_{}'.format(
                    self.proj) in self.hdf5_values:
            self.aprof('sigma', 'gal')

            aper_sum.add_column(
                Column(data=self.sigma_prof_gal['prof_w'], name='sigma_gal_w'))
            aper_sum.add_column(
                Column(data=self.sigma_prof_gal['prof'], name='sigma_gal'))
            aper_sum.add_column(
                Column(data=self.sigma_prof_gal['flag'],
                       name='sigma_gal_flag'))

            if not gal_only:
                self.aprof('sigma', 'ins')
                self.aprof('sigma', 'exs')

                aper_sum.add_column(
                    Column(data=self.sigma_prof_ins['prof_w'],
                           name='sigma_ins_w'))
                aper_sum.add_column(
                    Column(data=self.sigma_prof_ins['prof'], name='sigma_ins'))
                aper_sum.add_column(
                    Column(data=self.sigma_prof_ins['flag'],
                           name='sigma_ins_flag'))
                aper_sum.add_column(
                    Column(data=self.sigma_prof_exs['prof_w'],
                           name='sigma_exs_w'))
                aper_sum.add_column(
                    Column(data=self.sigma_prof_exs['prof'], name='sigma_exs'))
                aper_sum.add_column(
                    Column(data=self.sigma_prof_exs['flag'],
                           name='sigma_exs_flag'))

        setattr(self, 'aper_sum', aper_sum.as_array())

        if output:
            return aper_sum
Beispiel #35
0
    print('Excluding columns not needed for proseco')
    excludes = ['PLX', 'PLX_ERR', 'PLX_CATID',
                'ACQQ1', 'ACQQ2', 'ACQQ3', 'ACQQ4', 'ACQQ5', 'ACQQ6',
                'XREF_ID1', 'XREF_ID2', 'XREF_ID3', 'XREF_ID4', 'XREF_ID5',
                'RSV4', 'RSV5', 'RSV6',
                'POS_CATID', 'PM_CATID',
                'MAG', 'MAG_ERR', 'MAG_BAND', 'MAG_CATID',
                'COLOR1_ERR', 'C1_CATID',  # Keep color1, 2, 3
                'COLOR2_ERR', 'C2_CATID',
                'RSV2',
                'VAR_CATID']

    names = [name for name in stars.dtype.names if name not in excludes]
    print('Dtype before excluding:\n', stars.dtype)
    stars = Table([stars[name] for name in names], names=names, copy=False)
    stars = stars.as_array()
    print('Dtype after excluding:\n', stars.dtype)

print('Sorting on Dec and re-ordering')
idx = np.argsort(stars['DEC'])
stars = stars.take(idx)

print('Creating miniagasc.h5 file')
rootname = 'proseco_agasc' if args.proseco else 'miniagasc'
filename = '{}_{}.h5'.format(rootname, args.version)

table_desc, bo = tables.descr_from_dtype(stars.dtype)
minih5 = tables.open_file(filename, mode='w')
minitbl = minih5.create_table('/', 'data', table_desc,
                              title='AGASC {}'.format(num_version))
print('Appending stars to {} file'.format(filename))