示例#1
0
文件: files.py 项目: esheldon/espy
    def collate_one(self, data0, shnum, psfnum, ccd):
        from esutil.numpy_util import match
        if self['verbose']:
            print '    collating with catalog'
        cat=read_cat(run=self['run'],
                     shnum=shnum,
                     psfnum=psfnum,
                     ccd=ccd,
                     verbose=self['verbose'])

        extra=[('mag_auto_r','f8'),
               ('shnum','i2'),
               ('psfnum','i2'),
               ('ccd','i2'),
               ('sratio','f8')]
        dt=data0.dtype.descr + extra

        data=zeros(data0.size, dtype=dt)
        for n in data0.dtype.names:
            data[n] = data0[n]

        mcat,mshear=match(cat['id'], data0['id'])
        if mshear.size != data0.size:
            raise ValueError("not all matched!")

        data['mag_auto_r'][mshear] = cat['mag_auto_r'][mcat]
        data['shnum'] = shnum
        data['psfnum'] = psfnum
        data['ccd'] = ccd
        data['sratio'] = sqrt(1./data0['s2'])
        return data
示例#2
0
def read_exposure_data(dir,exposurename):

    out_dtype=[('ccd','i1'),
               ('x','f4'),('y','f4'),
               ('psf_flags','i4'),
               ('e1','f4'),('e2','f4'),
               ('e1interp','f4'),('e2interp','f4')]

    datalist=[]
    for ccd in xrange(1,1+62):
        psf_file = path_join(dir,'%s_%02d_psf.fits' % (exposurename,ccd))
        psf=esutil.io.read(psf_file)
        psfe1 = psf['shapelets'][:,3]*sqrt(2)
        psfe2 = -psf['shapelets'][:,4]*sqrt(2)

        shear_file = path_join(dir,'%s_%02d_shear.fits' % (exposurename,ccd))
        shear=esutil.io.read(shear_file)
        e1interp = shear['interp_psf_coeffs'][:,3]*sqrt(2)
        e2interp = -shear['interp_psf_coeffs'][:,4]*sqrt(2)

        mpsf,mshear=numpy_util.match(psf['id'], shear['id'])

        tdata = numpy.zeros(mpsf.size, dtype=out_dtype)

        tdata['ccd'] = ccd
        tdata['x'] = psf['x'][mpsf]
        tdata['y'] = psf['y'][mpsf]
        tdata['e1'] = psfe1[mpsf]
        tdata['e2'] = psfe2[mpsf]

        tdata['psf_flags'] = psf['psf_flags'][mpsf]

        tdata['e1interp'] = e1interp[mshear]
        tdata['e2interp'] = e2interp[mshear]

        datalist.append(tdata)


    data = numpy_util.combine_arrlist(datalist)
    return data
示例#3
0
文件: scat.py 项目: esheldon/espy
    def match(self, tilename):
        """
        match and write the output file
        """
        from esutil import numpy_util as nu

        scat_data = read_orig_scat(self.scat_name, tilename)

        print("matching")
        mdg, msc = nu.match(scat_data["coadd_objects_id"], self.ids)
        print("    matched: %d/%d" % (mdg.size, scat_data.size))

        if mdg.size == 0:
            print("    skipping")
            return

        scat_data = scat_data[mdg]
        add_dt = [("scinv_flags", "i4"), ("scinv", "f8", self.nz)]
        newdata = nu.add_fields(scat_data, add_dt)
        newdata["scinv"] = self.data["scinv"][msc, :]
        newdata["scinv_flags"] = self.data["flags"][msc]

        self._write_data(newdata, tilename)
示例#4
0
def make_im3shape(config, info):
    """Make the im3shape catalog file
    """

    # Setup the dtype:
    dt = np.dtype([
        ('coadd_objects_id', 'i8'),
        ('e_1', 'f8'),
        ('e_2', 'f8'),
        ('nbc_m', 'f8'),
        ('nbc_c1', 'f8'),
        ('nbc_c2', 'f8'),
        ('w', 'f8'),
        ('error_flag', 'i4'),
        ('info_flag', 'i4'),
        ('snr_w', 'f4'),
        ('snr_r', 'f4'),
        ('flux_r', 'f4'),
        ('radius', 'f4'),
        ('is_bulge', 'i4'),
        ('mean_rgpp_rp', 'f4'),
        ('mean_psf_e1', 'f8'),
        ('mean_psf_e2', 'f8'),
        ('mean_psf_fwhm', 'f4'),
        ('ra_shift', 'f4'),
        ('dec_shift', 'f4'),
        ('chi2', 'f4'),
        ('likelihood', 'f4'),
        ('stamp_size', 'i4'),
        ('n_exposure', 'i4') ])

    # Read the source catalog
    imcat = fitsio.read(config['im3shape'])

    # im3shape objects are sorted, but has some that are not in info catalog.
    im, dim = numpy_util.match(imcat['coadd_objects_id'], info['COADD_OBJECTS_ID'])
    #assert np.all(imcat['coadd_objects_id'] == sorted(imcat['coadd_objects_id']))

    # Set reasonable defaults for float fields:
    data = np.zeros(len(info), dtype=dt)
    for col, t in dt.descr:
        if 'f' in t:
            data[col] = -9999

    # The coadd_objects_id column can match for all rows
    data['coadd_objects_id'] = info['COADD_OBJECTS_ID']

    # Check that the overlap with imcat is correct
    assert np.all(data['coadd_objects_id'][dim] == imcat['coadd_objects_id'][im])

    # Default error_flag is NO_ATTEMPT
    data['error_flag'] = 2**30
    data['info_flag'] = 2**25

    print 'unique error_flags in original catalog = ',np.unique(imcat['error_flag'])
    print 'unique error_flags that match info = ',np.unique(imcat['error_flag'][im])

    # Copy in the columns from the source catalog that keep the same name:
    for col in ['nbc_m', 'nbc_c1', 'nbc_c2', 'w', 'error_flag', 'info_flag',
                'radius', 'mean_rgpp_rp', 'mean_psf_fwhm', 'likelihood',
                'stamp_size', 'n_exposure']:
        data[col][dim] = imcat[col][im]

    print 'unique error_flags in final = ',np.unique(data['error_flag'][dim])

    # Some get a new name:
    data['e_1'][dim] = imcat['e1'][im]
    data['e_2'][dim] = imcat['e2'][im]
    data['snr_w'][dim] = imcat['snr'][im]
    data['snr_r'][dim] = imcat['round_snr'][im]
    data['mean_psf_e1'][dim] = imcat['mean_psf_e1_sky'][im]
    data['mean_psf_e2'][dim] = imcat['mean_psf_e2_sky'][im]
    data['ra_shift'][dim] = imcat['ra_as'][im]
    data['dec_shift'][dim] = imcat['dec_as'][im]
    data['chi2'][dim] = imcat['chi2_pixel'][im]

    # Do a calculation to get the flux from separate bulge/disc fluxes.
    data['is_bulge'][dim] = imcat['bulge_flux'][im] != 0.
    # Only one is non-zero:
    bulge = imcat['bulge_flux'][im] != 0.
    disc = imcat['disc_flux'][im] != 0.
    print 'sum isbulge = ',np.sum(data['is_bulge'][dim])
    print 'sum bulge_flux > 0. = ',np.sum(imcat['bulge_flux'][im] != 0.)
    print 'sum disc_flux > 0. = ',np.sum(imcat['disc_flux'][im] != 0.)
    assert np.all((imcat['bulge_flux'][im] != 0) != (imcat['disc_flux'][im] != 0))
    data['flux_r'][dim] = imcat['mean_flux'][im] * (imcat['bulge_flux'][im]+imcat['disc_flux'][im])

    # clip the weights
    data['w'][dim] = np.clip(data['w'][dim], 0.0, 0.24**(-2.0))

    # unblind
    for tag in ['e_1','e_2']:
        data[tag][dim] /= unblind.get_factor()
    
    # Before writing out, make all the names upper case
    data.dtype.names = tuple([name.upper() for name in data.dtype.names])

    fitsio.write(config['release_im3shape'],data,clobber=True)
示例#5
0
def make_ngmix(config, info):
    """Make the ngmix catalog file
    """

    # Setup the dtype:
    dt = np.dtype([
        ('coadd_objects_id', 'i8'),
        ('e_1', 'f8'),
        ('e_2', 'f8'),
        ('sens_avg', 'f8'),
        ('w', 'f8'),
        ('e_cov_1_1', 'f8'),
        ('e_cov_1_2', 'f8'),
        ('e_cov_2_1', 'f8'),
        ('e_cov_2_2', 'f8'),
        ('error_flag', 'i4'),
        ('snr_w', 'f4'),
        ('snr_r', 'f4'),
        ('flux_i', 'f4'),
        ('mag_i', 'f4'),
        ('t', 'f4'),
        ('t_err', 'f4'),
        ('t_r', 'f4'),
        ('snr_t', 'f4'),
        ('snr_t_r', 'f4'),
        ('log10_sb_i', 'f4'),
        ('mean_psf_e1', 'f8'),
        ('mean_psf_e2', 'f8'),
        ('mean_psf_t', 'f4'),
        ('sens_1', 'f4'),
        ('sens_2', 'f4'),
        ('arate', 'f4'),
        ('stamp_size', 'i4'),
        ('mask_frac', 'f4') ])

    # Read the source catalogs
    ngcat = fitsio.read(config['ngmix'])

    # ngmix catalogs are not sorted, so need to use match function
    # Also, ngmix has some object that are not in info catalog.
    ng, dng = numpy_util.match(ngcat['coadd_objects_id'], info['COADD_OBJECTS_ID'])

    # Set reasonable defaults for float fields:
    data = np.zeros(len(info), dtype=dt)
    for col, t in dt.descr:
        if 'f' in t:
            data[col] = -9999

    # The coadd_objects_id column can match for all rows
    data['coadd_objects_id'] = info['COADD_OBJECTS_ID']

    # Check that the overlap with ngcat is correct
    assert np.all(data['coadd_objects_id'][dng] == ngcat['coadd_objects_id'][ng])

    # Copy in the columns from the source catalog that keep the same name:
    for col in ['mask_frac']:
        data[col][dng] = ngcat[col][ng]

    # Some columns just need to drop the 'exp_' prefix:
    for col in ['e_1', 'e_2', 'flux_i', 'mag_i', 'arate', 't', 't_err',
                'e_cov_1_1', 'e_cov_1_2', 'e_cov_2_2']:
        data[col][dng] = ngcat['exp_' + col][ng]

    # Some need to be renamed:
    data['snr_w'][dng] = ngcat['exp_s2n_w'][ng]
    data['snr_t'][dng] = ngcat['exp_t_s2n'][ng]
    data['sens_1'][dng] = ngcat['exp_e_sens_1'][ng]
    data['sens_2'][dng] = ngcat['exp_e_sens_2'][ng]
    data['stamp_size'][dng] = ngcat['box_size'][ng]

    # Combine the flags we have so far:
    print 'flags range from %d to %d'%(
        np.min(ngcat['flags'][ngcat['flags']>0]),
        np.max(ngcat['flags']))
    print 'exp_flags range from %d to %d'%(
        np.min(ngcat['exp_flags'][ngcat['exp_flags']>0]),
        np.max(ngcat['exp_flags']))
    data['error_flag'] = 2**30
    data['error_flag'][dng] = ngcat['flags'][ng]
    data['error_flag'][dng] |= ngcat['exp_flags'][ng]

    # Calculate mean sensitivity
    data['sens_avg'][dng] = (ngcat['exp_e_sens_1'][ng] + ngcat['exp_e_sens_2'][ng]) / 2.

    # Calculate the recommended weight.
    data['w'][dng] = 1.0/(2.0*0.22*0.22 + ngcat['exp_e_cov_1_1'][ng] + ngcat['exp_e_cov_2_2'][ng])

    # Calculate log10(sb)
    data['log10_sb_i'][dng] = np.log10(np.abs(ngcat['exp_flux_i'][ng]/ngcat['exp_t'][ng]))

    # swap e1 signs
    for tag in ['e_1', 'e_cov_1_2', 'e_cov_2_1']:
        data[tag][dng] *= -1.0
    data['e_cov_2_1'][dng] = data['e_cov_1_2'][dng]

    # unblind
    for tag in ['e_1','e_2']:
        data[tag][dng] /= unblind.get_factor()

    # Bring in the round columns from the s2n catalog
    scat = fitsio.read(config['ngmix_s2n'])
    sc, dsc = numpy_util.match(scat['id'], info['COADD_OBJECTS_ID'])
    data['snr_r'][dsc] = scat['exp_s2n_r'][sc]
    data['t_r'][dsc] = scat['exp_T_r'][sc]
    data['snr_t_r'][dsc] = scat['exp_T_s2n_r'][sc]

    # Check that round_flags are consisten with current error_flag
    print 'round_flags range from %d to %d'%(
        np.min(scat['round_flags'][scat['round_flags']>0]),
        np.max(scat['round_flags']))
    print 'exp_round_flags range from %d to %d'%(
        np.min(scat['exp_round_flags'][scat['exp_round_flags']>0]),
        np.max(scat['exp_round_flags'][scat['exp_round_flags']<2**30]))
    assert np.all(data['error_flag'][dsc][ scat['round_flags'][sc] > 0 ] > 0)
    assert np.all(data['error_flag'][dsc][ scat['exp_round_flags'][sc] == 2**30 ] > 0)

    # Combine the round flags into the error_flag column
    data['error_flag'][dsc] |= scat['round_flags'][sc]
    data['error_flag'][dsc] |= scat['exp_round_flags'][sc]

    # Bring in the mean psf information from the psf catalog
    pcat = fitsio.read(config['ngmix_psfs'])
    pc, dpc = numpy_util.match(pcat['id'], info['COADD_OBJECTS_ID'])
    data['mean_psf_e1'][dpc] = pcat['psfrec_e'][pc,0]
    data['mean_psf_e2'][dpc] = pcat['psfrec_e'][pc,1]
    data['mean_psf_t'][dpc] = pcat['psfrec_T'][pc]

    # swap e1 signs
    for tag in ['mean_psf_e1']:
        data[tag][dng] *= -1.0

    # Before writing out, make all the names upper case
    data.dtype.names = tuple([name.upper() for name in data.dtype.names])

    fitsio.write(config['release_ngmix'],data,clobber=True)
    phi_best_rrlyr = []
    r0_best_rrlyr = []
    F_best_rrlyr = []


    table2 = np.genfromtxt('%s/table2.dat' % home_path, names='id, type, P, rA', usecols=(0, 1, 2, 11), dtype='u4, |S10, f8, f8')

    # load light curve parameters of SDSS S82 RR Lyrae stars
    true_params = np.genfromtxt('%s/table2.dat' % home_path, names='id, type, P, gA, g0, gE, gT, rA, r0, rE, rT, iA, i0, iE, iT, zA, z0, zE, zT', usecols=(0, 1, 2, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22), dtype='u4, |S10, f8, f8, f8, f8, u2, f8, f8, f8, u2, f8, f8, f8, u2, f8, f8, f8, u2')

    ab = true_params['type'] == 'ab'
    ab = true_params[ab]
    # sort RRab star-templates by phi_08
    phi_08 = np.genfromtxt('%s/objid_phi08_all.txt' % template_path, names='id, phi_08g, phi_08r, phi_08i, phi_08z', dtype='u4, f8, f8, f8, f8')
    # match two lists using id
    m1, m2 = match(ab['id'], phi_08['id'])
    assert (m1.size == ab.size)
    ab = ab[m1]
    phi_08 = phi_08[m2]
    # sort in ascending order, starting with g band
    sI = np.argsort(phi_08, order=('phi_08g', 'phi_08r', 'phi_08i', 'phi_08z'))
    ab = ab[sI]

    # select 2 RRc star-templates
    c = (true_params['id'] == 1342595) | (true_params['id'] == 3031571)
    c = true_params[c]

    # merge RRab and RRc star-templates (RRc need to go at the end otherwise the code will break)
    true_params = np.concatenate((ab, c))

    # load from table3 ra & dec
示例#7
0
def make_im3shape(config, info):
    """Make the im3shape catalog file
    """

    # Setup the dtype:
    dt = np.dtype([('coadd_objects_id', 'i8'), ('e_1', 'f8'), ('e_2', 'f8'),
                   ('nbc_m', 'f8'), ('nbc_c1', 'f8'), ('nbc_c2', 'f8'),
                   ('w', 'f8'), ('error_flag', 'i4'), ('info_flag', 'i4'),
                   ('snr_w', 'f4'), ('snr_r', 'f4'), ('flux_r', 'f4'),
                   ('radius', 'f4'), ('is_bulge', 'i4'),
                   ('mean_rgpp_rp', 'f4'), ('mean_psf_e1', 'f8'),
                   ('mean_psf_e2', 'f8'), ('mean_psf_fwhm', 'f4'),
                   ('ra_shift', 'f4'), ('dec_shift', 'f4'), ('chi2', 'f4'),
                   ('likelihood', 'f4'), ('stamp_size', 'i4'),
                   ('n_exposure', 'i4')])

    # Read the source catalog
    imcat = fitsio.read(config['im3shape'])

    # im3shape objects are sorted, but has some that are not in info catalog.
    im, dim = numpy_util.match(imcat['coadd_objects_id'],
                               info['COADD_OBJECTS_ID'])
    #assert np.all(imcat['coadd_objects_id'] == sorted(imcat['coadd_objects_id']))

    # Set reasonable defaults for float fields:
    data = np.zeros(len(info), dtype=dt)
    for col, t in dt.descr:
        if 'f' in t:
            data[col] = -9999

    # The coadd_objects_id column can match for all rows
    data['coadd_objects_id'] = info['COADD_OBJECTS_ID']

    # Check that the overlap with imcat is correct
    assert np.all(
        data['coadd_objects_id'][dim] == imcat['coadd_objects_id'][im])

    # Default error_flag is NO_ATTEMPT
    data['error_flag'] = 2**30
    data['info_flag'] = 2**25

    print 'unique error_flags in original catalog = ', np.unique(
        imcat['error_flag'])
    print 'unique error_flags that match info = ', np.unique(
        imcat['error_flag'][im])

    # Copy in the columns from the source catalog that keep the same name:
    for col in [
            'nbc_m', 'nbc_c1', 'nbc_c2', 'w', 'error_flag', 'info_flag',
            'radius', 'mean_rgpp_rp', 'mean_psf_fwhm', 'likelihood',
            'stamp_size', 'n_exposure'
    ]:
        data[col][dim] = imcat[col][im]

    print 'unique error_flags in final = ', np.unique(data['error_flag'][dim])

    # Some get a new name:
    data['e_1'][dim] = imcat['e1'][im]
    data['e_2'][dim] = imcat['e2'][im]
    data['snr_w'][dim] = imcat['snr'][im]
    data['snr_r'][dim] = imcat['round_snr'][im]
    data['mean_psf_e1'][dim] = imcat['mean_psf_e1_sky'][im]
    data['mean_psf_e2'][dim] = imcat['mean_psf_e2_sky'][im]
    data['ra_shift'][dim] = imcat['ra_as'][im]
    data['dec_shift'][dim] = imcat['dec_as'][im]
    data['chi2'][dim] = imcat['chi2_pixel'][im]

    # Do a calculation to get the flux from separate bulge/disc fluxes.
    data['is_bulge'][dim] = imcat['bulge_flux'][im] != 0.
    # Only one is non-zero:
    bulge = imcat['bulge_flux'][im] != 0.
    disc = imcat['disc_flux'][im] != 0.
    print 'sum isbulge = ', np.sum(data['is_bulge'][dim])
    print 'sum bulge_flux > 0. = ', np.sum(imcat['bulge_flux'][im] != 0.)
    print 'sum disc_flux > 0. = ', np.sum(imcat['disc_flux'][im] != 0.)
    assert np.all(
        (imcat['bulge_flux'][im] != 0) != (imcat['disc_flux'][im] != 0))
    data['flux_r'][dim] = imcat['mean_flux'][im] * (imcat['bulge_flux'][im] +
                                                    imcat['disc_flux'][im])

    # clip the weights
    data['w'][dim] = np.clip(data['w'][dim], 0.0, 0.24**(-2.0))

    # unblind
    for tag in ['e_1', 'e_2']:
        data[tag][dim] /= unblind.get_factor()

    # Before writing out, make all the names upper case
    data.dtype.names = tuple([name.upper() for name in data.dtype.names])

    fitsio.write(config['release_im3shape'], data, clobber=True)
示例#8
0
def make_ngmix(config, info):
    """Make the ngmix catalog file
    """

    # Setup the dtype:
    dt = np.dtype([('coadd_objects_id', 'i8'), ('e_1', 'f8'), ('e_2', 'f8'),
                   ('sens_avg', 'f8'), ('w', 'f8'), ('e_cov_1_1', 'f8'),
                   ('e_cov_1_2', 'f8'),
                   ('e_cov_2_1', 'f8'), ('e_cov_2_2', 'f8'),
                   ('error_flag', 'i4'), ('snr_w', 'f4'), ('snr_r', 'f4'),
                   ('flux_i', 'f4'), ('mag_i', 'f4'), ('t', 'f4'),
                   ('t_err', 'f4'), ('t_r', 'f4'), ('snr_t', 'f4'),
                   ('snr_t_r', 'f4'), ('log10_sb_i', 'f4'),
                   ('mean_psf_e1', 'f8'), ('mean_psf_e2', 'f8'),
                   ('mean_psf_t', 'f4'), ('sens_1', 'f4'), ('sens_2', 'f4'),
                   ('arate', 'f4'), ('stamp_size', 'i4'), ('mask_frac', 'f4')])

    # Read the source catalogs
    ngcat = fitsio.read(config['ngmix'])

    # ngmix catalogs are not sorted, so need to use match function
    # Also, ngmix has some object that are not in info catalog.
    ng, dng = numpy_util.match(ngcat['coadd_objects_id'],
                               info['COADD_OBJECTS_ID'])

    # Set reasonable defaults for float fields:
    data = np.zeros(len(info), dtype=dt)
    for col, t in dt.descr:
        if 'f' in t:
            data[col] = -9999

    # The coadd_objects_id column can match for all rows
    data['coadd_objects_id'] = info['COADD_OBJECTS_ID']

    # Check that the overlap with ngcat is correct
    assert np.all(
        data['coadd_objects_id'][dng] == ngcat['coadd_objects_id'][ng])

    # Copy in the columns from the source catalog that keep the same name:
    for col in ['mask_frac']:
        data[col][dng] = ngcat[col][ng]

    # Some columns just need to drop the 'exp_' prefix:
    for col in [
            'e_1', 'e_2', 'flux_i', 'mag_i', 'arate', 't', 't_err',
            'e_cov_1_1', 'e_cov_1_2', 'e_cov_2_2'
    ]:
        data[col][dng] = ngcat['exp_' + col][ng]

    # Some need to be renamed:
    data['snr_w'][dng] = ngcat['exp_s2n_w'][ng]
    data['snr_t'][dng] = ngcat['exp_t_s2n'][ng]
    data['sens_1'][dng] = ngcat['exp_e_sens_1'][ng]
    data['sens_2'][dng] = ngcat['exp_e_sens_2'][ng]
    data['stamp_size'][dng] = ngcat['box_size'][ng]

    # Combine the flags we have so far:
    print 'flags range from %d to %d' % (np.min(
        ngcat['flags'][ngcat['flags'] > 0]), np.max(ngcat['flags']))
    print 'exp_flags range from %d to %d' % (np.min(
        ngcat['exp_flags'][ngcat['exp_flags'] > 0]), np.max(
            ngcat['exp_flags']))
    data['error_flag'] = 2**30
    data['error_flag'][dng] = ngcat['flags'][ng]
    data['error_flag'][dng] |= ngcat['exp_flags'][ng]

    # Calculate mean sensitivity
    data['sens_avg'][dng] = (ngcat['exp_e_sens_1'][ng] +
                             ngcat['exp_e_sens_2'][ng]) / 2.

    # Calculate the recommended weight.
    data['w'][dng] = 1.0 / (2.0 * 0.22 * 0.22 + ngcat['exp_e_cov_1_1'][ng] +
                            ngcat['exp_e_cov_2_2'][ng])

    # Calculate log10(sb)
    data['log10_sb_i'][dng] = np.log10(
        np.abs(ngcat['exp_flux_i'][ng] / ngcat['exp_t'][ng]))

    # swap e1 signs
    for tag in ['e_1', 'e_cov_1_2', 'e_cov_2_1']:
        data[tag][dng] *= -1.0
    data['e_cov_2_1'][dng] = data['e_cov_1_2'][dng]

    # unblind
    for tag in ['e_1', 'e_2']:
        data[tag][dng] /= unblind.get_factor()

    # Bring in the round columns from the s2n catalog
    scat = fitsio.read(config['ngmix_s2n'])
    sc, dsc = numpy_util.match(scat['id'], info['COADD_OBJECTS_ID'])
    data['snr_r'][dsc] = scat['exp_s2n_r'][sc]
    data['t_r'][dsc] = scat['exp_T_r'][sc]
    data['snr_t_r'][dsc] = scat['exp_T_s2n_r'][sc]

    # Check that round_flags are consisten with current error_flag
    print 'round_flags range from %d to %d' % (np.min(
        scat['round_flags'][scat['round_flags'] > 0]),
                                               np.max(scat['round_flags']))
    print 'exp_round_flags range from %d to %d' % (
        np.min(scat['exp_round_flags'][scat['exp_round_flags'] > 0]),
        np.max(scat['exp_round_flags'][scat['exp_round_flags'] < 2**30]))
    assert np.all(data['error_flag'][dsc][scat['round_flags'][sc] > 0] > 0)
    assert np.all(
        data['error_flag'][dsc][scat['exp_round_flags'][sc] == 2**30] > 0)

    # Combine the round flags into the error_flag column
    data['error_flag'][dsc] |= scat['round_flags'][sc]
    data['error_flag'][dsc] |= scat['exp_round_flags'][sc]

    # Bring in the mean psf information from the psf catalog
    pcat = fitsio.read(config['ngmix_psfs'])
    pc, dpc = numpy_util.match(pcat['id'], info['COADD_OBJECTS_ID'])
    data['mean_psf_e1'][dpc] = pcat['psfrec_e'][pc, 0]
    data['mean_psf_e2'][dpc] = pcat['psfrec_e'][pc, 1]
    data['mean_psf_t'][dpc] = pcat['psfrec_T'][pc]

    # swap e1 signs
    for tag in ['mean_psf_e1']:
        data[tag][dng] *= -1.0

    # Before writing out, make all the names upper case
    data.dtype.names = tuple([name.upper() for name in data.dtype.names])

    fitsio.write(config['release_ngmix'], data, clobber=True)
示例#9
0
    #find objects within the searchRadius and whose noOfObs is atleast 3
    angSepMask = sphdist(ra, dec, medRa, medDec) <= (searchRadius*60) #& (nObs>=3.0) -- not needed since this has been taken care of while calculating residuals and medians previously 
    global triand
    #separate these entire rows from the original testH5file
    #we donot need to check for galaxies now, since the parameters being referred to have already been checked

    #searchFile = triand[angSepMask]
    objInRadius = objID[angSepMask]
    
    k = triand[triand.get_where_list('obj_id == o') for o in objInRadius]
    #this might be stupid
    searchFile = []
    for o in objInRadius:
        searchFile.append(triand[triand.get_where_list('obj_id == o')])

    m1, m2 = match(triand.col('obj_id'), objInRadius)
    m1, m2 = match_multi(triand.col('obj_id'), objInRadius)
    
    print triand[m1]
    print triand.col('obj_id')[m1]
    print objInRadius[m2]


    

    #putting unique object IDs in this file
    uniqueSearchFile = np.unique(searchFile['obj_id'])
    #file containing data for objects inside pixel
    objInPixelFile =searchFile[pixelIndexForObj == pixelNo]
    #i make a copy of the file, since i need the original values later
    objInPixelFile2 = searchFile[pixelIndexForObj == pixelNo]
示例#10
0
文件: collate.py 项目: esheldon/espy
    def get_matched_struct(self, pipe):
        dt=[
            ('simid','i4'),
            ('ccd','i2'),
            ('id','i4'),
            ('flags','i4'),
            ('row','f8'),
            ('col','f8'),
            ('model','S4'),
            ('s2n_w','f8'),
            ('Ts2n','f8'),
            ('s2','f8'),
            ('sratio','f8'),
            ('objtype','S4'),
            ('g','f8',2),
            ('gcov','f8',(2,2)),
            ('gsens','f8',2),
            ('Tmean','f8'),
            ('weight','f8'),
            ('use1','i2'),
            ('use2','i2'),
            ('use3','i2'),
            ('use4','i2')]

        # note this s2n min is just the limit used in the
        # shear measurement code
        wgal=pipe.get_gals(s2n_min=self.conf['shear_s2n_min'])

        gals=pipe.cat[wgal]
        output=zeros(gals.size, dtype=dt)

        print wgal.size, pipe.shear_res.size
        w,=where(gals['simid'] != pipe.shear_res['simid'])
        if w.size != 0:
            raise ValueError("gals and shear don't line up")

        mcat,mshear=match(gals['simid'], pipe.shear_res['simid'])
        if mshear.size != pipe.shear_res.size:
            mess="""not all shear objects matched
                by simid: %d %d""" % (pipe.shear_res.size,mshear.size)
            print mess
         
        # gets simid, row, col
        copy_fields(gals, output)

        output['ccd'] = pipe['ccd']

        output['flags'][:] = pipe.shear_res['flags'][:]

        output['s2n_w'][:] = pipe.shear_res['s2n_w'][:]
        output['Ts2n'][:] = pipe.shear_res['Ts2n'][:]
        output['s2'][:] = pipe.shear_res['s2'][:]
        output['sratio'][:] = sqrt(1./output['s2'][:])

        for i in xrange(output.size):
            output['objtype'][i] = pipe.shear_res['model'][i]

        output['g'][:,:] = pipe.shear_res['g'][:,:]
        output['gcov'][:,:,:] = pipe.shear_res['gcov'][:,:,:]
        output['gsens'][:,:] = pipe.shear_res['gsens'][:,:]

        output['Tmean'][:] = pipe.shear_res['Tmean'][:]
        output['model'][:] = pipe.shear_res['model'][:]


        wts=stats.get_weights(pipe.shear_res['gcov'][:,:,:])
        output['weight'][:] = wts[:]


        return output