Example #1
0
def verify_info(config):
    """Verify that the info file we made is consistent with what Matt's script builds.
    """
    print 'Verifying info catalog...'
    rel = fitsio.read(config['release_info'])
    print 'Full catalog has %d rows'%len(rel)
    v18 = fitsio.read(os.path.join('/Users/Mike/Astro/des/SV/v18',config['flatcats_info']))
    print 'v18 catalog has %d rows'%len(v18)
    q = np.where(rel['SVA1_FLAG'] <= 3)[0]
    print 'mask has %d rows'%len(q)
    assert len(q) == len(v18)

    for col in ['COADD_OBJECTS_ID', 'RA', 'DEC',
                'MAG_AUTO_G', 'MAG_AUTO_R', 'MAG_AUTO_I', 'MAG_AUTO_Z',
                'PHOTOZ_BIN', 'MEAN_PHOTOZ']:
        print 'Test rel[%r] == v18[%r]'%(col,col.lower())
        assert np.all(rel[col][q] == v18[col.lower()])

    # v18 used 4 for im3shape, ngmix flags rather than 1.
    v18['im3shape_flags'] /= 4
    v18['ngmix_flags'] /= 4

    for col1, col2 in [ ('IM3SHAPE_FLAG', 'im3shape_flags'),
                        ('NGMIX_FLAG', 'ngmix_flags'),
                        ('SVA1_FLAG', 'sva1_gold_flags') ]:
        print 'Test rel[%r] == v18[%r]'%(col1,col2)
        assert np.all(rel[col1][q] == v18[col2])

    print 'info file passed verification tests'
    return q
Example #2
0
File: data.py Project: dfm/ketu
    def __init__(self, fn, time0, gp=True, skip=0, invert=False):
        self.gp = gp

        data, hdr = fitsio.read(fn, header=True)
        aps = fitsio.read(fn, 2)

        self.texp = (hdr["INT_TIME"] * hdr["NUM_FRM"]) / 86400.0

        # Choose the photometry with the smallest variance.
        var = aps["cdpp6"]
        var[var < 0.0] = np.inf
        i = np.argmin(var)

        # Load the data.
        self.skip = int(skip)
        self.time = data["time"] - time0
        self.flux = data["flux"][:, i]
        if invert:
            mu = np.median(self.flux[np.isfinite(self.flux)])
            self.flux = 2 * mu - self.flux
        self.quality = np.array(data["quality"], dtype=int)
        q = data["quality"]
        q = ((q == 0) | (q == 16384).astype(bool))
        self.m = (np.isfinite(self.time) &
                  np.isfinite(self.flux) &
                  (np.arange(len(self.time)) > int(skip)) &
                  q)
Example #3
0
def read_means(s2n_min, args):
    fname = get_fname(s2n_min, args)

    print('reading:',fname)
    means = fitsio.read(fname)
    means_nocorr = fitsio.read(fname, ext='nocorr')
    return means, means_nocorr
Example #4
0
def add_depth_tag(decals, brick, outdir, overwrite=False):
    outfn = os.path.join(outdir, 'tractor', brick[:3], 'tractor-%s.fits' % brick)
    if os.path.exists(outfn) and not overwrite:
        print 'Exists:', outfn
        return
    fn = decals.find_file('tractor', brick=brick)
    if not os.path.exists(fn):
        print 'Does not exist:', fn
        return
    T = fits_table(fn, lower=False)
    primhdr = fitsio.read_header(fn)
    hdr = fitsio.read_header(fn, ext=1)
    print 'Read', len(T), 'from', fn
    T.decam_depth    = np.zeros((len(T), len(decals.allbands)), np.float32)
    T.decam_galdepth = np.zeros((len(T), len(decals.allbands)), np.float32)
    bands = 'grz'
    ibands = [decals.index_of_band(b) for b in bands]
    ix = np.clip(np.round(T.bx).astype(int), 0, 3599)
    iy = np.clip(np.round(T.by).astype(int), 0, 3599)
    for iband,band in zip(ibands, bands):
        fn = decals.find_file('depth', brick=brick, band=band)
        if os.path.exists(fn):
            print 'Reading', fn
            img = fitsio.read(fn)
            T.decam_depth[:,iband] = img[iy, ix]

        fn = decals.find_file('galdepth', brick=brick, band=band)
        if os.path.exists(fn):
            print 'Reading', fn
            img = fitsio.read(fn)
            T.decam_galdepth[:,iband] = img[iy, ix]
    outfn = os.path.join(outdir, 'tractor', brick[:3], 'tractor-%s.fits' % brick)
    trymakedirs(outfn, dir=True)

    for s in [
        'Data product of the DECam Legacy Survey (DECaLS)',
        'Full documentation at http://legacysurvey.org',
        ]:
        primhdr.add_record(dict(name='COMMENT', value=s, comment=s))

    # print 'Header:', hdr
    # T.writeto(outfn, header=hdr, primheader=primhdr)

    # Yuck, all this to get the units right
    tmpfn = outfn + '.tmp'
    fits = fitsio.FITS(tmpfn, 'rw', clobber=True)
    fits.write(None, header=primhdr)
    cols = T.get_columns()
    units = []
    for i in range(1, len(cols)+1):
        u = hdr.get('TUNIT%i' % i, '')
        units.append(u)
    # decam_depth units
    fluxiv = '1/nanomaggy^2'
    units[-2] = fluxiv
    units[-1] = fluxiv
    fits.write([T.get(c) for c in cols], names=cols, header=hdr, units=units)
    fits.close()
    os.rename(tmpfn, outfn)
    print 'Wrote', outfn
def chisq_fig(good, img, g_det, g_detiv, r_det, r_detiv, wcs):
    g_det1 = fitsio.read('1d/detmap-g.fits')
    g_detiv1 = fitsio.read('1d/detiv-g.fits')
    r_det1 = fitsio.read('1d/detmap-r.fits')
    r_detiv1 = fitsio.read('1d/detiv-r.fits')

    g_sn1 = g_det1 * np.sqrt(g_detiv1)
    r_sn1 = r_det1 * np.sqrt(r_detiv1)
    goodpix1 = np.logical_and(g_detiv1 > 0.5 * np.median(g_detiv1),
                              r_detiv1 > 0.5 * np.median(r_detiv1))

    g_sn = g_det * np.sqrt(g_detiv)
    r_sn = r_det * np.sqrt(r_detiv)

    red_sed = [1., 2.5]

    # Detect on the single image
    #c3x,c3y = detect_sources(np.hypot(g_sn1, r_sn1), 3.)
    c3x,c3y = detect_sources(np.hypot(g_sn1, r_sn1), 4.5)
    keep = goodpix1[c3y, c3x]
    c3x = c3x[keep]
    c3y = c3y[keep]

    # Compute the S/N required for g-only or r-only to trigger the
    # "red" SED detector
    dm=[np.array([[1,0]]), np.array([[0,1]])]
    div=[np.ones(2), np.ones(2)]
    sn = sedsn(dm, div, red_sed)
    sng = sn[0,0]
    snr = sn[0,1]

    plt.figure(figsize=(6,4))
    plt.subplots_adjust(right=0.95, top=0.98)

    from matplotlib.patches import Circle
    plt.clf()
    # Annotate points as "true" or "false" based on deeper data.
    real = (np.hypot(g_sn[c3y,c3x], r_sn[c3y,c3x]) >  10.)
    fake = np.logical_not(real)
    #plt.plot(g_sn1[c3y,c3x][fake], r_sn1[c3y,c3x][fake], '.', color='0.5', alpha=0.2, label='False Peaks')
    plt.plot(g_sn1[c3y,c3x][fake], r_sn1[c3y,c3x][fake], 'x', color='0.5', alpha=0.5, label='False Detections')
    plt.plot(g_sn1[c3y,c3x][real], r_sn1[c3y,c3x][real], '.', color='k', alpha=0.5, label='Real Detections')
    a = np.linspace(0, 2.*np.pi, 100)
    plt.plot(5.*np.sin(a), 5.*np.cos(a), 'b-', label='Chi-squared detection')
    # r
    plt.axhline(5., color='r', linestyle=':', label='r-band only detection')
    # red
    m=-sng/snr
    b=5./snr
    xx = np.array([-20,40])
    plt.plot(xx, b+m*xx, 'm-', mew=2, linestyle='--', label="``Red'' SED-matched detection")
    plt.legend(loc='upper right', framealpha=1.0)
    plt.axis('square')
    plt.axis([-10,40,-10,20])
    plt.xlabel('g band S/N')
    plt.ylabel('r band S/N')
    plt.axhline(0, color='k', alpha=0.25)
    plt.axvline(0, color='k', alpha=0.25)
    plt.savefig('sed-matched.pdf')
Example #6
0
    def test_redmagic_fitter(self):
        np.random.seed(12345)

        file_path = 'data_for_tests/redmagic_test'

        # Read in the red-sequence parametrization

        # Read in the input data for comparison (no afterburner)
        calstr = fitsio.read(os.path.join(file_path, 'rcal_str_preab.fit'), ext=1, lower=True)

        # Read in the input data for testing (no afterburner)
        calstr2 = fitsio.read(os.path.join(file_path, 'rcal_str2.fit'), ext=1, lower=True)

        # Make a zred structure for mstar...
        config = Configuration(os.path.join('data_for_tests', 'testconfig.yaml'))
        zredstr = RedSequenceColorPar(None, config=config)

        # Set up the fitter...
        #randomn = np.random.normal(size=calstr2['z'][0, :].size)
        # Old IDL code did not sample for the selection, I think this was wrong
        randomn = np.zeros(calstr2['z'][0, :].size)

        rmfitter = RedmagicParameterFitter(calstr['nodes'][0, :], calstr['corrnodes'][0, :],
                                           calstr2['z'][0, :], calstr2['z_err'][0, :],
                                           calstr2['chisq'][0, :], calstr2['mstar'][0, :],
                                           calstr2['zcal'][0, :], calstr2['zcal_e'][0, :],
                                           calstr2['refmag'][0, :], randomn,
                                           calstr2['zmax'][0, :],
                                           calstr['etamin'][0], calstr['n0'][0],
                                           calstr2['volume'][0, :], calstr2['zrange'][0, :],
                                           calstr2['zbinsize'][0],
                                           zredstr, maxchi=20.0,
                                           ab_use=calstr2['afterburner_use'][0, :])

        # These match the IDL values
        testing.assert_almost_equal(rmfitter(calstr['cmax'][0, :]), 1.9331937798956758)

        p0_cval = np.zeros(calstr['nodes'][0, :].size) + 2.0
        testing.assert_almost_equal(rmfitter(p0_cval), 317.4524284321642)

        cvals = rmfitter.fit(p0_cval)

        # This does not match the IDL output, because this is doing a lot
        # better job minimizing the function, at least in this test.
        # I hope this is just because of the size of the testbed, which is
        # really way too small for something like this.
        testing.assert_almost_equal(cvals, np.array([2.61657263, 2.20376531, 1.00663991]))

        # Now we have to check the fitting with the afterburner

        biasvals = np.zeros(rmfitter._corrnodes.size)
        eratiovals = np.ones(rmfitter._corrnodes.size)
        biasvals, eratiovals = rmfitter.fit_bias_eratio(cvals, biasvals, eratiovals)

        cvals = rmfitter.fit(cvals, biaspars=biasvals, eratiopars=eratiovals, afterburner=True)

        testing.assert_almost_equal(cvals, np.array([3.39002141, 1.74421087, 0.93541002]))
        testing.assert_almost_equal(biasvals, np.array([0.00896487, -0.02456343, 0.02006761]))
        testing.assert_almost_equal(eratiovals, np.array([1.49999937, 1.01673233, 0.65657318]))
Example #7
0
def read_all_data(config):
    default_columns = ['RA', 'DEC', 'Z', 'MAG_I']
    photometric_cat = fitsio.read(config['data_p'], 
                                  cols=default_columns)
    spectroscopic_cat = fitsio.read(config['data_s'],
                                    cols=default_columns)

    return photometric_cat, spectroscopic_cat
Example #8
0
File: k2.py Project: AstroVPK/kali
    def _readK2SFF(self, name, campaign, path, processing):
        fileNameMAST = self._getCanonicalFileName(name, campaign, 'mast')
        fileNameMASTFits = ''.join([fileNameMAST[0:-3], 'fits'])
        filePathMASTFits = os.path.join(path, fileNameMASTFits)
        MASTInFile = fitsio.read(filePathMASTFits)
        self._numCadences = MASTInFile.shape[0]
        startT = -1.0
        lineNum = 0
        while startT == -1.0:
            startTCand = MASTInFile[lineNum][0]
            startTCandNext = MASTInFile[lineNum + 1][0]
            if not np.isnan(startTCand) and not np.isnan(startTCandNext):
                startT = float(startTCand)
                dt = float(startTCandNext) - float(startTCand)
            else:
                lineNum += 1
        self.startT = startT
        self._dt = dt  # Increment between epochs.
        self.cadence = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.t = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.x = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.y = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.yerr = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.mask = np.require(np.zeros(self.numCadences), requirements=[
                               'F', 'A', 'W', 'O', 'E'])  # Numpy array of mask values.
        for i in xrange(self.numCadences):
            dataLine = MASTInFile[i]
            self.cadence[i] = int(dataLine[2])
            self.yerr[i] = math.sqrt(sys.float_info[0])
            if dataLine[9] == 0:
                self.t[i] = float(dataLine[0]) - self.startT
            else:
                if not np.isnan(dataLine[0]):
                    self.t[i] = float(dataLine[0]) - self.startT
                else:
                    self.t[i] = self.t[i - 1] + self.dt

        fileName = self._getCanonicalFileName(name, campaign, 'k2sff')
        fileNameFits = ''.join([fileName[0:-3], 'fits'])
        filePathFits = os.path.join(path, fileNameFits)
        dataInFile = fitsio.read(filePathFits)
        for i in xrange(dataInFile.shape[0]):
            dataLine = dataInFile[i]
            cadNum = int(dataLine[5])
            index = np.where(self.cadence == cadNum)[0][0]
            self.y[index] = float(dataLine[2])
            self.mask[index] = 1.0

        valSum = 0.0
        countSum = 0.0
        for i in xrange(self.numCadences - 1):
            valSum += self.mask[i + 1]*self.mask[i]*math.pow((self.y[i + 1] - self.y[i]), 2.0)
            countSum += self.mask[i + 1]*self.mask[i]
        noise = math.sqrt(valSum/countSum)
        for i in xrange(self.numCadences):
            if self.mask[i] == 1.0:
                self.yerr[i] = noise
Example #9
0
def stage1(ps=None, T=None, detmap=None, detnoise=None,
           band=None, coadd_id=None, basefn=None, psfnorm=None, cowcs=None,
           psfsig=None, coadd=None, hot=None, sig1=None, **kwargs):

    T.pix = np.array([None] * len(T))
    T.ox = np.array([None] * len(T))
    T.oy = np.array([None] * len(T))
    T.sig1 = np.zeros(len(T))
    for i,wise in enumerate(T):
        intfn = get_l1b_file(wisedir, wise.scan_id, wise.frame_num, band)
        print 'intfn:', intfn
        maskfn = intfn.replace('-int-', '-msk-')
        if mask_gz:
            maskfn = maskfn + '.gz'
        print 'maskfn', maskfn

        fn = os.path.basename(intfn).replace('-int', '')
        comaskfn = os.path.join('%smask' % basefn,
                              'unwise-mask-' + coadd_id + '-' + fn + '.gz')
        print 'comaskfn', comaskfn

        if not (os.path.exists(intfn) and os.path.exists(maskfn)):
            print 'file not found; skipping'
            continue
        
        #print 'Reading...'
        img = fitsio.read(intfn)
        mask = fitsio.read(maskfn)
        comask = fitsio.read(comaskfn)

        #print 'Filtering...'
        img -= wise.sky1
        img[mask > 0] = 0.
        img[comask > 0] = 0.
        zpscale = 1. / zeropointToScale(wise.zeropoint)
        img *= zpscale

        detmapi = gaussian_filter(img, psfsig, mode='constant')
        detmapi /= psfnorm**2
        #print 'Detmap range', img.min(), img.max()

        #print 'Resampling...'
        wcs = Tan(intfn)
        try:
            Yo,Xo,Yi,Xi,nil = resample_with_wcs(cowcs, wcs, [], None)
        except OverlapError:
            print 'No overlap; skipping'
            continue
        T.pix[i] = detmapi[Yi,Xi]
        T.ox [i] = Xo
        T.oy [i] = Yo
        T.sig1[i] = np.sqrt(1./wise.weight)
        print 'Saved', len(T.pix[i]), 'pixels'

    return dict(T=T)
Example #10
0
File: k2.py Project: AstroVPK/kali
    def _readK2VARCAT(self, name, campaign, path, processing):
        fileNameMAST = self._getCanonicalFileName(name, campaign, 'mast')
        fileNameMASTFits = ''.join([fileNameMAST[0:-3], 'fits'])
        filePathMASTFits = os.path.join(path, fileNameMASTFits)
        MASTInFile = fitsio.read(filePathMASTFits)
        self._numCadences = MASTInFile.shape[0]
        startT = -1.0
        lineNum = 0
        while startT == -1.0:
            startTCand = MASTInFile[lineNum][0]
            startTCandNext = MASTInFile[lineNum + 1][0]
            if not np.isnan(startTCand) and not np.isnan(startTCandNext):
                startT = float(startTCand)
                dt = float(startTCandNext) - float(startTCand)
            else:
                lineNum += 1
        self.startT = startT
        self._dt = dt  # Increment between epochs.
        self.cadence = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.t = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.x = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.y = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.yerr = np.require(np.zeros(self.numCadences), requirements=['F', 'A', 'W', 'O', 'E'])
        self.mask = np.require(np.zeros(self.numCadences), requirements=[
                               'F', 'A', 'W', 'O', 'E'])  # Numpy array of mask values.
        for i in xrange(self.numCadences):
            dataLine = MASTInFile[i]
            self.cadence[i] = int(dataLine[2])
            self.yerr[i] = math.sqrt(sys.float_info[0])
            if dataLine[9] == 0:
                self.t[i] = float(dataLine[0]) - self.startT
            else:
                if not np.isnan(dataLine[0]):
                    self.t[i] = float(dataLine[0]) - self.startT
                else:
                    self.t[i] = self.t[i - 1] + self.dt

        fileName = self._getCanonicalFileName(name, campaign, 'k2varcat')
        fileNameFits = ''.join([fileName[0:-3], 'fits'])
        filePathFits = os.path.join(path, fileNameFits)
        try:
            dataInFile = fitsio.read(filePathFits)
        except IOError as Err:
            pass
        else:
            for i in xrange(dataInFile.shape[0]):
                dataLine = dataInFile[i]
                time = float(dataLine[0]) - self.startT
                if not np.isnan(time):
                    index = np.where(self.t == time)[0][0]
                    self.y[index] = float(dataLine[3])
                    self.yerr[index] = float(dataLine[4])
                    self.mask[index] = 1.0
                else:
                    pass
Example #11
0
def test_pickle():
    """Test the reading a file written with python 2 pickling is readable with python 2 or 3.
    """
    if __name__ == '__main__':
        logger = piff.config.setup_logger(verbose=2)
    else:
        logger = piff.config.setup_logger(log_file='output/test_pickle.log')

    # First, this is the output file written by the above test_single function on python 2.
    # Shoudl be trivially readable by python 2, but make sure it is also readable by python 3.
    psf = piff.read('input/test_single_py27.piff', logger=logger)

    wcs1 = galsim.TanWCS(
            galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024)),
            galsim.CelestialCoord(-5 * galsim.arcmin, -25 * galsim.degrees)
            )
    wcs2 = galsim.TanWCS(
            galsim.AffineTransform(0.25, -0.02, 0.01, 0.24, galsim.PositionD(1024,1024)),
            galsim.CelestialCoord(5 * galsim.arcmin, -25 * galsim.degrees)
            )

    data1 = fitsio.read('input/test_single_cat1.fits')
    data2 = fitsio.read('input/test_single_cat2.fits')
    field_center = galsim.CelestialCoord(0 * galsim.degrees, -25 * galsim.degrees)

    for chipnum, data, wcs in [(1,data1,wcs1), (2,data2,wcs2)]:
        for k in range(len(data)):
            x = data['x'][k]
            y = data['y'][k]
            e1 = data['e1'][k]
            e2 = data['e2'][k]
            s = data['s'][k]
            #print('k,x,y = ',k,x,y)
            #print('  true s,e1,e2 = ',s,e1,e2)
            image_pos = galsim.PositionD(x,y)
            star = piff.Star.makeTarget(x=x, y=y, wcs=wcs, stamp_size=48, pointing=field_center,
                                        chipnum=chipnum)
            star = psf.drawStar(star)
            #print('  fitted s,e1,e2 = ',star.fit.params)
            np.testing.assert_almost_equal(star.fit.params, [s,e1,e2], decimal=6)


    # This is a DES Y3 PSF file that Matt Becker reported hadn't been readable with python 3.
    # The problem was it had been written with python 2's pickle, which isn't directly
    # compatible with python 3.  The code has been fixed to make it readable.  This unit
    # test is just to ensure that it remains so.
    # However, it only works if pixmappy is installed, so if not, just bail out.
    try:
        import pixmappy
    except ImportError:
        return
    fname = os.path.join('input', 'D00240560_r_c01_r2362p01_piff.fits')
    psf = piff.PSF.read(fname, logger=logger)
    image = psf.draw(x=103.3, y=592.0)
Example #12
0
    def __init__(self, galaxy, scale, band):
        self.topdir = os.path.join(os.getenv('HIZEA_DATA'),'hst','aplus','cutouts')
        self.imfile = os.path.join(self.topdir,'{}-{}-{}.fits'.format(galaxy,scale,band))
        self.ivarfile = os.path.join(self.topdir,'{}-{}-{}-ivar.fits'.format(galaxy,scale,band))
        self.psffile = os.path.join(self.topdir,'{}-{}-{}.psf'.format(galaxy,scale,band))
        
        print('Reading image {}'.format(self.imfile))
        self.image = fitsio.read(self.imfile, ext=0)
        self.header = fitsio.read_header(self.imfile)

        print('Reading image {}'.format(self.ivarfile))
        self.ivar = fitsio.read(self.ivarfile, ext=0)
Example #13
0
    def loadLabels(self,filename=None):
        if filename is None: filename = self.labelfile
        data = fitsio.read(filename)
        distances = fitsio.read(filename,ext='DISTANCE_MODULUS')['DISTANCE_MODULUS']
        if not (self.pixels == data['PIXEL']).all(): 
            raise Exception("Pixels do not match")
        if not (self.distances == distances).all():
            raise Exception("Distance moduli do not match.")

        self.labels = data['LABEL'].astype(int)
        self.nlabels = self.labels.max()
        if self.nlabels != (len(np.unique(self.labels)) - 1):
            raise Exception("Incorrect number of labels found.")
        return self.labels, self.nlabels
Example #14
0
def verify_ngmix(config, q, ng):
    """Verify that the ngmix file we made is consistent with what Matt's script builds.
    """
    print 'Verifying ngmix catalog...'
    rel = fitsio.read(config['release_ngmix'])
    print 'Full catalog has %d rows'%len(rel)
    v18 = fitsio.read(os.path.join('/Users/Mike/Astro/des/SV/v18',config['flatcats_ngmix']))
    print 'v18 catalog has %d rows'%len(v18)
    print 'mask has %d rows'%len(q)
    assert len(q) == len(v18)

    relq = rel[q]
    for col1, col2 in [ ('COADD_OBJECTS_ID', 'coadd_objects_id'),
                        ('E_1', 'exp_e_1'), 
                        ('E_2', 'exp_e_2'),
                        ('SENS_AVG', 'exp_e_sens_avg'),
                        ('W', 'exp_w'),
                        ('E_COV_1_1', 'exp_e_cov_1_1'),
                        ('E_COV_1_2', 'exp_e_cov_1_2'),
                        ('E_COV_2_1', 'exp_e_cov_2_1'),
                        ('E_COV_2_2', 'exp_e_cov_2_2'),
                        ('MEAN_PSF_E1', 'psfrec_e_1'),
                        ('MEAN_PSF_E2', 'psfrec_e_2'),
                        ('STAMP_SIZE', 'box_size') ]:
        print 'Test rel[%r] == v18[%r]'%(col1,col2)
        assert np.all(relq[col1] == v18[col2])

    for col1, col2 in [ ('SNR_W', 'exp_s2n_w'),
                        ('SNR_R', 'exp_s2n_r'),
                        ('FLUX_I', 'exp_flux_i'),
                        ('MAG_I', 'exp_mag_i'),
                        ('T', 'exp_T'),
                        ('T_ERR', 'exp_T_err'),
                        ('T_R', 'exp_T_r'),
                        ('SNR_T', 'exp_T_s2n'),
                        ('SNR_T_R', 'exp_T_s2n_r'),
                        ('LOG10_SB_I', 'exp_log10sb_i'),
                        ('MEAN_PSF_T', 'psfrec_T'),
                        ('SENS_1', 'exp_e_sens_1'),
                        ('SENS_2', 'exp_e_sens_2'),
                        ('ARATE', 'exp_arate'),
                        ('MASK_FRAC', 'mask_frac') ]:
        print 'Test rel[%r] ~= v18[%r]'%(col1,col2)
        assert np.all(np.abs(relq[col1] - v18[col2]) <= 1.e-7 * np.abs(v18[col2]))

    assert np.all(relq['ERROR_FLAG'] == (v18['flags'] | v18['exp_flags']))
    
    print 'ngmix file passed verification tests'
Example #15
0
def verify_im3shape(config, q, im):
    """Verify that the im3shape file we made is consistent with what Matt's script builds.
    """
    print 'Verifying im3shape catalog...'
    rel = fitsio.read(config['release_im3shape'])
    print 'Full catalog has %d rows'%len(rel)
    v18 = fitsio.read(os.path.join('/Users/Mike/Astro/des/SV/v18',config['flatcats_im3shape']))
    print 'v18 catalog has %d rows'%len(v18)
    print 'mask has %d rows'%len(q)
    assert len(q) == len(v18)

    relq = rel[q][im]
    v18 = v18[im]
    for col1, col2 in [ ('COADD_OBJECTS_ID', 'coadd_objects_id'),
                        ('E_1', 'e1'), 
                        ('E_2', 'e2'),
                        ('NBC_M', 'nbc_m'),
                        ('NBC_C1', 'nbc_c1'),
                        ('NBC_C2', 'nbc_c2'),
                        ('W', 'w'),
                        ('ERROR_FLAG', 'error_flag'),
                        ('INFO_FLAG', 'info_flag'),
                        ('MEAN_PSF_E1', 'mean_psf_e1_sky'),
                        ('MEAN_PSF_E2', 'mean_psf_e2_sky'),
                        ('STAMP_SIZE', 'stamp_size'),
                        ('N_EXPOSURE', 'n_exposure') ]:
        print 'Test rel[%r] == v18[%r]'%(col1,col2)
        assert np.all(relq[col1] == v18[col2])

    for col1, col2 in [ ('SNR_W', 'snr'),
                        ('SNR_R', 'round_snr'),
                        ('RADIUS', 'radius'),
                        ('MEAN_RGPP_RP', 'mean_rgpp_rp'),
                        ('MEAN_PSF_FWHM', 'mean_psf_fwhm'),
                        ('RA_SHIFT', 'ra_as'),
                        ('DEC_SHIFT', 'dec_as'),
                        ('CHI2', 'chi2_pixel'),
                        ('LIKELIHOOD', 'likelihood') ]:
        print 'Test rel[%r] ~= v18[%r]'%(col1,col2)
        assert np.all(np.abs(relq[col1] - v18[col2]) <= 1.e-7 * np.abs(v18[col2]))

    assert np.all(relq['IS_BULGE'] == (v18['bulge_flux'] > 0.))
    assert np.all(relq['IS_BULGE'] == (v18['disc_flux'] == 0.))
    flux1 = relq['FLUX_R']
    flux2 = v18['mean_flux'] * (v18['bulge_flux'] + v18['disc_flux'])
    assert np.all(np.abs(flux1 - flux2) <= 1.e-7 * np.abs(flux2))
    
    print 'im3shape file passed verification tests'
Example #16
0
    def do_file_sums(self, fname):
        """
        get sums for a single file
        """

        sums=self.get_sums_struct()

        print("processing:",fname)
        try:
            data=fitsio.read(fname) 
        except IOError as err:
            print(str(err))
            return None

        if 'shear_index' not in data.dtype.names:
            data=self._add_shear_index(data)
        else:
            w,=where(data['shear_index']==-1)
            if w.size > 0:
                data['shear_index'][w]=0

        data=self._preselect(data)

        sums=self.do_sums1(data)

        return sums
Example #17
0
def write_rgb():
    #g,r,z = [fitsio.read('detmap-%s.fits' % band) for band in 'grz']
    g,r,z = [fitsio.read('coadd-%s.fits' % band) for band in 'grz']

    plt.figure(figsize=(10,10))
    plt.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95)

    plt.clf()
    for (im1,cc),scale in zip([(g,'b'),(r,'g'),(z,'r')],
                             [2.0, 1.2, 0.4]):
        im = im1 * scale
        im = im[im != 0]
        plt.hist(im.ravel(), histtype='step', color=cc,
                 range=[np.percentile(im, p) for p in (1,98)], bins=50)
    ps.savefig()
        
    #rgb = get_rgb_image(g,r,z, alpha=0.8, m=0.02)
    #rgb = get_rgb_image(g,r,z, alpha=16., m=0.005, m2=0.002,
    #rgb = get_rgb_image(g,r,z, alpha=32., m=0.01, m2=0.002,
    rgb = get_rgb_image(g,r,z, alpha=8., m=0.0, m2=0.0,
        scale_g = 2.,
        scale_r = 1.1,
        scale_z = 0.5,
        Q = 10)


    #for im in g,r,z:
    #    mn,mx = [np.percentile(im, p) for p in [20,99]]
    #    print 'mn,mx:', mn,mx
    
    plt.clf()
    plt.imshow(rgb, interpolation='nearest', origin='lower')
    ps.savefig()

    fitsio.write('rgb.fits', rgb)
Example #18
0
 def _consumeRead(self, path, cmd, header=None):
     #  /home/data/wincharis/H2RG-C17206-ASIC-104/UpTheRamp/20160712210126/H2RG_R01_M01_N01.fits
     dirName, fileName = os.path.split(path)
     cmd.diag('text="checking %s"' % (fileName))
     match = re.match('^H2RG_R0*(\d+)_M0*(\d+)_N0*(\d+)\.fits', fileName)
     if match is None:
         cmd.warn("failed to split up filename: %s" % (file))
         return
     rampN, groupN, readN = [int(m) for m in match.group(1,2,3)]
     cmd.diag('text="new read %d %d %d"' % (rampN, groupN, readN))
     if readN == 1:
         if header is not None:
             cmd.diag('text="getting header"')
             subaruHdr = header
         else:
             subaruHdr = pyfits.Header()
         cards = [dict(name='IDLPATH', value=dirName)]
         for c in subaruHdr.cards:
             cards.append(dict(name=c.keyword, value=c.value, comment=c.comment))
         phdu = fitsio.FITSHDR(cards)
         fitsio.write(self.outfile, None, header=phdu, clobber=True)
         cmd.diag('text="new file %s"' % (self.outfile))
         
     inData, inHdr = fitsio.read(path, header=True)
     stackFile = fitsio.FITS(self.outfile, mode='rw')
     stackFile.write(inData, header=inHdr)
     stackFile[-1].write_checksum()
     stackFile.close()
     cmd.inform('readN=%d,%d,%d,%s' % (rampN,groupN,readN,self.outfile))
Example #19
0
def load_data():

    import glob;
    columns_list = [ config['columns'][name] for name in config['columns'].keys() ]
    filelist_results = glob.glob('%s/DES*.gz'%config['results_dir'])
    list_data = []
    logger.info('found %d files' % len(filelist_results))
    for file_results in filelist_results[args.first:(args.first+args.num)]:

        res=fitsio.read(file_results)
        warnings.warn('%s'%str(res.dtype.names))
        res=res[columns_list]
        res.dtype.names=config['columns'].keys()

        if config['method'] == 'im3shape':
            warnings.warn('using 1 as m1 and m2')
            res['m1'] = 1
            res['m2'] = 1

        res_cut = apply_cuts(res)
        logger.info('%s size=%d/%d' % (file_results,len(res_cut),len(res)) )
        list_data.append(res_cut)

    res = np.concatenate(list_data)
    logger.info('total data size=%d' % (len(res)))

    return res  
Example #20
0
    def readFITSMappable(self, mappable, fieldmap):
        """
        For the file held in mappable, read in the fields defined in
        fieldmap. File must be in FITS format.
        """

        mapunit = {}
        ft      = mappable.dtype
        fname   = mappable.name

        for f in fieldmap.keys():
            fields = []
            for val in fieldmap[ft].values():
                if hasattr(val, '__iter__'):
                    fields.extend(val)
                else:
                    fields.extend([val])

        data = fitsio.read(fname, columns=fields)
        for mapkey in fieldmap[ft].keys():
            mapunit[mapkey] = data[fieldmap[ft][mapkey]]
            if hasattr(fieldmap[ft][mapkey], '__iter__'):
                dt = mapunit[mapkey].dtype[0]
                ne = len(mapunit[mapkey])
                nf = len(fieldmap[ft][mapkey])
                mapunit[mapkey] = mapunit[mapkey].view(dt).reshape((ne,nf))

        return mapunit
Example #21
0
def read_map2(filename,field=0,dtype=np.float64,nest=False,hdu=1,h=False,verbose=True,memmap=False):
    hdr=fitsio.read_header(filename,ext=hdu)
    
    fullsky = False
    try:
        if (hdr['OBJECT'].strip() == 'PARTIAL') :
            # partial sky format
            fullsky=False
        else:
            fullsky=True
    except:
        # if no OBJECT in header, assume full sky
        fullsky=True

    if fullsky:
        m=hp.read_map(filename,field=field,dtype=dtype,nest=nest,hdu=hdu,h=h,verbose=verbose,memmap=memmap)
    else:
        # partial sky
        st=fitsio.read(filename,ext=1)
        nside=hdr['NSIDE']

        m=np.zeros(12*nside*nside,dtype=dtype) + hp.UNSEEN

        if ((hdr['ORDERING'].strip() == 'NESTED') and (not nest)) :
            # change from nest to ring...
            m[hp.nest2ring(nside,st['PIXEL'])] = st['SIGNAL']
        elif ((hdr['ORDERING'].strip() == 'RING') and (nest)):
            # change from ring to nest...
            m[hp.ring2nest(nside,st['PIXEL'])] = st['SIGNAL']
        else :
            # straight up
            m[st['PIXEL']] = st['SIGNAL']

    return m
Example #22
0
    def _load_data(self):
        import fitsio
        import desdb
        print("loading catalogs")

        df=desdb.DESFiles()

        alldata=[]

        cols=['flags','alphawin_j2000','deltawin_j2000','spread_model','mag_auto']

        nr=len(self.run_data)
        for i,rdata in enumerate(self.run_data):

            coadd_run=rdata['run']
            tilename=rdata['tilename']

            dd = {'coadd_run':coadd_run,
                  'tilename':tilename,
                  'band':self.select_band}

            f=df.url(type='coadd_cat',
                     coadd_run=coadd_run,
                     tilename=tilename,
                     band=self.select_band)

            print("    %d/%d  %s" % (i+1,nr,f))
            cat=fitsio.read(f,columns=cols,lower=True)

            dd['data'] = cat

            alldata.append(dd)
        
        self.alldata=alldata
Example #23
0
def mkgalmapY3ac(res,zr,gz='.gz',md='',fore='',wm='',syscut=''):
	gl = []
	for i in range(0,12*res*res):
		gl.append(0)
	#f = fitsio.read(dir+'dr1_lss_red_'+zr+'_v0_redux.fits.gz',ext=1)
	f = fitsio.read(dir+'test'+zr+mask+'.fits'+gz,ext=1)
	ngt = 0
	w = 1.
	zem = 0
	fw = ''
	if fore == 'fore':
		fw = '_fore'
	#if fore == 'auto':
	#	fw = '_auto'
	if md == 'nodepth':
		md = '_none'
	#else:
	#	md = '_'+md	
	for i in range(0,len(f)):
		ra,dec = f[i]['RA'],f[i]['DEC']
		
		#if f[i]['v0'+md+fw] == 1.:
		
		#if wm != '':
		#	w = float(ln[4])
		#if z > zmin and z < zmax:
		th,phi = radec2thphi(ra,dec)
		p = hp.ang2pix(res,th,phi,nest=True)
		gl[p] += w
		ngt += w
	print len(gl),ngt
	return gl
Example #24
0
def old_read_ccd_data(expcat, work, expnum):

    all_data = []
    ccdnums = []
    for k in range(len(expcat)):
        ccdnum = expcat[k]['ccdnum']
        if expcat[k]['flag'] != 0:
            #print('Skipping ccd %d because it is blacklisted: '%ccdnum, expcat[k]['flag'])
            continue

        cat_file = os.path.join(work, str(expnum), "psf_cat_%d_%d.fits"%(expnum,ccdnum))
        #print('cat_file = ',cat_file)
        try:
            data = fitsio.read(cat_file)
        except (OSError, IOError):
            print('Unable to open cat_file %s.  Skipping this file.'%cat_file)
            continue
        all_data.append(data)
        ccdnums.extend([ccdnum] * len(data))

    #print('all_data = ',all_data)
    all_data = np.concatenate(all_data)
    ccdnums = np.array(ccdnums, dtype=int)
    #print('all_data => ',all_data)
    #print('ccdnums = ',ccdnums)
    return all_data, ccdnums
Example #25
0
def read_prior(**keys):
    """
    parameters
    ----------
    run: string, keyword
        String representing the run, e.g. sfit-noisefree-c01
    partype: string, keyword
        Something extra to identify this
    ext: string, keyword, optional
        Extension for file, default 'fits' 
    """
    import fitsio
    from ngmix.gmix import GMixND

    fname=get_prior_file(**keys)
    print("reading:",fname)
    data = fitsio.read(fname)

    weights=data['weights']
    means=data['means']
    covars=data['covars']

    if len(means.shape) == 1:
        means = means.reshape( (means.size, 1) )

    prior = GMixND(weights,
                   means,
                   covars)
    return prior
Example #26
0
def aspcapStar(loc_id,apogee_id,ext=1,dr=None,header=True,
               aspcapWavegrid=False):
    """
    NAME:
       aspcapStar
    PURPOSE:
       Read an aspcapStar file for a given star
    INPUT:
       loc_id - location ID (field for 1m targets)
       apogee_id - APOGEE ID of the star
       ext= (1) extension to load
       header= (True) if True, also return the header
       dr= return the path corresponding to this data release (general default)
       aspcapWavegrid= (False) if True, output the spectrum on the ASPCAP 
                       wavelength grid
    OUTPUT:
       aspcapStar file or (aspcapStar file, header)
    HISTORY:
       2014-11-25 - Written - Bovy (IAS)
    """
    filePath= path.aspcapStarPath(loc_id,apogee_id,dr=dr)
    if not os.path.exists(filePath):
        download.aspcapStar(loc_id,apogee_id,dr=dr)
    data= fitsio.read(filePath,ext,header=header)
    return data
Example #27
0
    def _load_psf_map(self, **kw):
        """
        we fake the coadd psf
        """
        extra_data=kw.get('extra_data',{})

        map_file=extra_data.get('psf_map',None)
        if map_file is None:
            raise RuntimeError("for Y3 you must send a map file")

        data=fitsio.read(map_file)
        psf_map={}
        for i in xrange(data.size):

            if i==0:
                ii=i+1
            else:
                ii=i

            fname=data['im_filename'][i].strip()
            psf_path = data['psf_local_path'][ii].strip()

            keep = fname.split('_')[0:0+3]
            key = '-'.join(keep )

            psf_map[key] = psf_path

        self._psf_map=psf_map
Example #28
0
def read_my_lackner_sersicn_fits(n_gauss):
    """
    read file holding my gaussian fits to lackner sersicn distribution
    """
    import fitsio
    fname=get_my_lackner_sersicn_fits(n_gauss)
    return fitsio.read(fname)
Example #29
0
def get_phoenix_spectra(teff,logg,z):
    ftemplate = 'lte{teff:05d}-{logg:4.2f}{z:+3.1f}.PHOENIX-ACES-AGSS-COND-2011-HiRes.fits'

    PHOENIX_DIR = spiderman.rcParams['PHOENIX_DIR']

    filename = os.path.join(PHOENIX_DIR,ftemplate.format(teff=teff,logg=logg,z=z))

    # changing to si, W / m^3 / str
    if use_fitsio:
        flux,h = fitsio.read(filename, ext=0, header=True)
    else:
        flux,h = fits.getdata(filename, ext=0, header=True)
    
    flux = flux*1e-7*1e6/(np.pi)

    crval = h['CRVAL1']
    cdelt = h['CDELT1']
    ctype = h['CTYPE1']

    if ctype == 'AWAV-LOG':
        wvl = (np.exp(crval + cdelt*np.arange(0,len(flux))))*1e-10
    else:
        print('ctype is not log! It  is {}'.format(ctype))

    return wvl, flux
        def filter(brickname, filename, region):
            if not intersect(sweep, region): 
                return None, None
            try:
                objects = fitsio.read(filename, 1, upper=True)
                chunkheader = fitsio.read_header(filename, 0, upper=True)
            except:
                if ns.ignore_errors:
                    print('IO error on %s' % filename)
                    return None, None
                else:
                    raise
            mask = objects['BRICK_PRIMARY'] != 0
            objects = objects[mask]
            mask = objects['RA'] >= ra1
            mask &= objects['RA'] < ra2
            mask &= objects['DEC'] >= dec1
            mask &= objects['DEC'] < dec2
            objects = objects[mask]

            chunk = np.empty(len(objects), dtype=SWEEP_DTYPE)

            for colname in chunk.dtype.names:
                if colname not in objects.dtype.names:
                    # skip missing columns 
                    continue
                try:
                    chunk[colname][...] = objects[colname][...]
                except ValueError:
                    print('failed on column `%s`' % colname)
                    raise
            chunkheader = dict([(key, chunkheader[key]) for key in chunkheader.keys()])    
            return chunk, chunkheader
Example #31
0
def make_bright_star_mask_in_hp(nside, pixnum, verbose=True, gaiaepoch=2015.5,
                                maglim=12., matchrad=1., maskepoch=2023.0):
    """Make a bright star mask in a HEALPixel using Tycho, Gaia and URAT.

    Parameters
    ----------
    nside : :class:`int`
        (NESTED) HEALPixel nside.
    pixnum : :class:`int`
        A single HEALPixel number.
    verbose : :class:`bool`
        If ``True`` then log informational messages.

    Returns
    -------
    :class:`recarray`
        The bright star mask in the form of `maskdatamodel.dtype`.

    Notes
    -----
        - Runs in a a minute or so for a typical nside=4 pixel.
        - See :func:`~desitarget.brightmask.make_bright_star_mask` for
          descriptions of the output mask and the other input parameters.
    """
    # ADM start the clock.
    t0 = time()

    # ADM read in the Tycho files.
    tychofns = find_tycho_files_hp(nside, pixnum, neighbors=False)
    tychoobjs = []
    for fn in tychofns:
        tychoobjs.append(fitsio.read(fn, ext='TYCHOHPX'))
    tychoobjs = np.concatenate(tychoobjs)
    # ADM create the Tycho reference magnitude, which is VT then HP
    # ADM then BT in order of preference.
    tychomag = tychoobjs["MAG_VT"].copy()
    tychomag[tychomag == 0] = tychoobjs["MAG_HP"][tychomag == 0]
    tychomag[tychomag == 0] = tychoobjs["MAG_BT"][tychomag == 0]
    # ADM discard any Tycho objects below the input magnitude limit
    # ADM and outside of the HEALPixels of interest.
    theta, phi = np.radians(90-tychoobjs["DEC"]), np.radians(tychoobjs["RA"])
    tychohpx = hp.ang2pix(nside, theta, phi, nest=True)
    ii = (tychohpx == pixnum) & (tychomag < maglim)
    tychomag, tychoobjs = tychomag[ii], tychoobjs[ii]
    if verbose:
        log.info('Read {} (mag < {}) Tycho objects (pix={})...t={:.1f} mins'.
                 format(np.sum(ii), maglim, pixnum, (time()-t0)/60))

    # ADM read in the associated Gaia files. Also grab
    # ADM neighboring pixels to prevent edge effects.
    gaiafns = find_gaia_files(tychoobjs, neighbors=True)
    gaiaobjs = []
    cols = 'SOURCE_ID', 'RA', 'DEC', 'PHOT_G_MEAN_MAG', 'PMRA', 'PMDEC'
    for fn in gaiafns:
        if os.path.exists(fn):
            gaiaobjs.append(fitsio.read(fn, ext='GAIAHPX', columns=cols))

    gaiaobjs = np.concatenate(gaiaobjs)
    gaiaobjs = rfn.rename_fields(gaiaobjs, {"SOURCE_ID": "REF_ID"})
    # ADM limit Gaia objects to 3 magnitudes fainter than the passed
    # ADM limit. This leaves some (!) leeway when matching to Tycho.
    gaiaobjs = gaiaobjs[gaiaobjs['PHOT_G_MEAN_MAG'] < maglim + 3]
    if verbose:
        log.info('Read {} (G < {}) Gaia sources (pix={})...t={:.1f} mins'.format(
            len(gaiaobjs), maglim+3, pixnum, (time()-t0)/60))

    # ADM substitute URAT where Gaia proper motions don't exist.
    ii = ((np.isnan(gaiaobjs["PMRA"]) | (gaiaobjs["PMRA"] == 0)) &
          (np.isnan(gaiaobjs["PMDEC"]) | (gaiaobjs["PMDEC"] == 0)))
    if verbose:
        log.info('Add URAT for {} Gaia objs with no PMs (pix={})...t={:.1f} mins'
                 .format(np.sum(ii), pixnum, (time()-t0)/60))

    urat = add_urat_pms(gaiaobjs[ii], numproc=1)
    if verbose:
        log.info('Found an additional {} URAT objects (pix={})...t={:.1f} mins'
                 .format(np.sum(urat["URAT_ID"] != -1), pixnum, (time()-t0)/60))
    for col in "PMRA", "PMDEC":
        gaiaobjs[col][ii] = urat[col]
    # ADM need to track the URATID to track which objects have
    # ADM substituted proper motions.
    uratid = np.zeros_like(gaiaobjs["REF_ID"])-1
    uratid[ii] = urat["URAT_ID"]

    # ADM match to remove Tycho objects already in Gaia. Prefer the more
    # ADM accurate Gaia proper motions. Note, however, that Tycho epochs
    # ADM can differ from the mean (1991.5) by as as much as 0.86 years,
    # ADM so a star with a proper motion as large as Barnard's Star
    # ADM (10.3 arcsec) can be off by a significant margin (~10").
    margin = 10.
    ra, dec = rewind_coords(gaiaobjs["RA"], gaiaobjs["DEC"],
                            gaiaobjs["PMRA"], gaiaobjs["PMDEC"],
                            epochnow=gaiaepoch)
    # ADM match Gaia to Tycho with a suitable margin.
    if verbose:
        log.info('Match Gaia to Tycho with margin={}" (pix={})...t={:.1f} mins'
                 .format(margin, pixnum, (time()-t0)/60))
    igaia, itycho = radec_match_to([ra, dec],
                                   [tychoobjs["RA"], tychoobjs["DEC"]],
                                   sep=margin, radec=True)
    if verbose:
        log.info('{} matches. Refining at 1" (pix={})...t={:.1f} mins'.format(
            len(itycho), pixnum, (time()-t0)/60))

    # ADM match Gaia to Tycho at the more exact reference epoch.
    epoch_ra = tychoobjs[itycho]["EPOCH_RA"]
    epoch_dec = tychoobjs[itycho]["EPOCH_DEC"]
    # ADM some of the Tycho epochs aren't populated.
    epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
    ra, dec = rewind_coords(gaiaobjs["RA"][igaia], gaiaobjs["DEC"][igaia],
                            gaiaobjs["PMRA"][igaia], gaiaobjs["PMDEC"][igaia],
                            epochnow=gaiaepoch,
                            epochpast=epoch_ra, epochpastdec=epoch_dec)
    # ADM catch the corner case where there are no initial matches.
    if ra.size > 0:
        _, refined = radec_match_to([ra, dec], [tychoobjs["RA"][itycho],
                                    tychoobjs["DEC"][itycho]], radec=True)
    else:
        refined = np.array([], dtype='int')
    # ADM retain Tycho objects that DON'T match Gaia.
    keep = np.ones(len(tychoobjs), dtype='bool')
    keep[itycho[refined]] = False
    tychokeep, tychomag = tychoobjs[keep], tychomag[keep]
    if verbose:
        log.info('Kept {} Tychos with no Gaia match (pix={})...t={:.1f} mins'
                 .format(len(tychokeep), pixnum, (time()-t0)/60))

    # ADM now we're done matching to Gaia, limit Gaia to the passed
    # ADM magnitude limit and to the HEALPixel boundary of interest.
    theta, phi = np.radians(90-gaiaobjs["DEC"]), np.radians(gaiaobjs["RA"])
    gaiahpx = hp.ang2pix(nside, theta, phi, nest=True)
    ii = (gaiahpx == pixnum) & (gaiaobjs['PHOT_G_MEAN_MAG'] < maglim)
    gaiakeep, uratid = gaiaobjs[ii], uratid[ii]
    if verbose:
        log.info('Mask also comprises {} Gaia sources (pix={})...t={:.1f} mins'
                 .format(len(gaiakeep), pixnum, (time()-t0)/60))

    # ADM move the coordinates forwards to the input mask epoch.
    epoch_ra, epoch_dec = tychokeep["EPOCH_RA"], tychokeep["EPOCH_DEC"]
    # ADM some of the Tycho epochs aren't populated.
    epoch_ra[epoch_ra == 0], epoch_dec[epoch_dec == 0] = 1991.5, 1991.5
    ra, dec = rewind_coords(
        tychokeep["RA"], tychokeep["DEC"], tychokeep["PM_RA"], tychokeep["PM_DEC"],
        epochnow=epoch_ra, epochnowdec=epoch_dec, epochpast=maskepoch)
    tychokeep["RA"], tychokeep["DEC"] = ra, dec
    ra, dec = rewind_coords(
        gaiakeep["RA"], gaiakeep["DEC"], gaiakeep["PMRA"], gaiakeep["PMDEC"],
        epochnow=gaiaepoch, epochpast=maskepoch)
    gaiakeep["RA"], gaiakeep["DEC"] = ra, dec

    # ADM finally, format according to the mask data model...
    gaiamask = np.zeros(len(gaiakeep), dtype=maskdatamodel.dtype)
    tychomask = np.zeros(len(tychokeep), dtype=maskdatamodel.dtype)
    for col in "RA", "DEC":
        gaiamask[col] = gaiakeep[col]
        gaiamask["PM"+col] = gaiakeep["PM"+col]
        tychomask[col] = tychokeep[col]
        tychomask["PM"+col] = tychokeep["PM_"+col]
    gaiamask["REF_ID"] = gaiakeep["REF_ID"]
    # ADM take care to rigorously convert to int64 for Tycho.
    tychomask["REF_ID"] = tychokeep["TYC1"].astype('int64')*int(1e6) + \
        tychokeep["TYC2"].astype('int64')*10 + tychokeep["TYC3"]
    gaiamask["REF_CAT"], tychomask["REF_CAT"] = 'G2', 'T2'
    gaiamask["REF_MAG"] = gaiakeep['PHOT_G_MEAN_MAG']
    tychomask["REF_MAG"] = tychomag
    gaiamask["URAT_ID"], tychomask["URAT_ID"] = uratid, -1
    gaiamask["TYPE"], tychomask["TYPE"] = 'PSF', 'PSF'
    mask = np.concatenate([gaiamask, tychomask])
    # ADM ...and add the mask radii.
    mask["IN_RADIUS"], mask["NEAR_RADIUS"] = radii(mask["REF_MAG"])

    if verbose:
        log.info("Done making mask...(pix={})...t={:.1f} mins".format(
            pixnum, (time()-t0)/60.))

    return mask
Example #32
0
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if brute=True.

    ngal = 100
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0,s, (ngal,) )
    y = rng.normal(0,s, (ngal,) )
    w = rng.random_sample(ngal)
    kap = rng.normal(0,3, (ngal,) )

    cat = treecorr.Catalog(x=x, y=y, w=w, k=kap)

    min_sep = 1.
    bin_size = 0.2
    nrbins = 10
    nubins = 5
    nvbins = 5
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    kkk.process(cat, num_threads=2)

    true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
    true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    true_zeta = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2)
                d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2)
                d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2)

                d3, d2, d1 = sorted([d12, d23, d31])
                rindex = np.floor(np.log(d2/min_sep) / bin_size).astype(int)
                if rindex < 0 or rindex >= nrbins: continue

                if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
                elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
                elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
                elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
                elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
                elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
                else: assert False
                # Now use ii, jj, kk rather than i,j,k, to get the indices
                # that correspond to the points in the right order.

                u = d3/d2
                v = (d1-d2)/d3
                if (x[jj]-x[ii])*(y[kk]-y[ii]) < (x[kk]-x[ii])*(y[jj]-y[ii]):
                    v = -v

                uindex = np.floor(u / bin_size).astype(int)
                assert 0 <= uindex < nubins
                vindex = np.floor((v+1) / bin_size).astype(int)
                assert 0 <= vindex < 2*nvbins

                www = w[i] * w[j] * w[k]
                zeta = www * kap[i] * kap[j] * kap[k]

                true_ntri[rindex,uindex,vindex] += 1
                true_weight[rindex,uindex,vindex] += www
                true_zeta[rindex,uindex,vindex] += zeta

    pos = true_weight > 0
    true_zeta[pos] /= true_weight[pos]

    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr3 script works correctly.
    config = treecorr.config.read_config('configs/kkk_direct.yaml')
    cat.write(config['file_name'])
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Also check the "cross" calculation.  (Real cross doesn't work, but this should.)
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins, brute=True)
    kkk.process(cat, cat, cat, num_threads=2)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    config['file_name2'] = config['file_name']
    config['file_name3'] = config['file_name']
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Repeat with binslop = 0
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                  bin_slop=0, max_top=0)
    kkk.process(cat)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-5, atol=1.e-8)

    # Check a few basic operations with a GGCorrelation object.
    do_pickle(kkk)

    kkk2 = kkk.copy()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, 2*kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, 2*kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, 2*kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, 2*kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, 2*kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, 2*kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, 2*kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, 2*kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, 2*kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, 2*kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, 2*kkk.zeta)

    kkk2.clear()
    kkk2 += kkk
    np.testing.assert_allclose(kkk2.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk2.weight, kkk.weight)
    np.testing.assert_allclose(kkk2.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk2.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk2.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk2.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk2.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk2.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk2.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk2.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk2.zeta, kkk.zeta)

    ascii_name = 'output/kkk_ascii.txt'
    kkk.write(ascii_name, precision=16)
    kkk3 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    kkk3.read(ascii_name)
    np.testing.assert_allclose(kkk3.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk3.weight, kkk.weight)
    np.testing.assert_allclose(kkk3.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk3.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk3.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk3.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk3.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk3.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk3.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk3.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk3.zeta, kkk.zeta)

    fits_name = 'output/kkk_fits.fits'
    kkk.write(fits_name)
    kkk4 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins)
    kkk4.read(fits_name)
    np.testing.assert_allclose(kkk4.ntri, kkk.ntri)
    np.testing.assert_allclose(kkk4.weight, kkk.weight)
    np.testing.assert_allclose(kkk4.meand1, kkk.meand1)
    np.testing.assert_allclose(kkk4.meand2, kkk.meand2)
    np.testing.assert_allclose(kkk4.meand3, kkk.meand3)
    np.testing.assert_allclose(kkk4.meanlogd1, kkk.meanlogd1)
    np.testing.assert_allclose(kkk4.meanlogd2, kkk.meanlogd2)
    np.testing.assert_allclose(kkk4.meanlogd3, kkk.meanlogd3)
    np.testing.assert_allclose(kkk4.meanu, kkk.meanu)
    np.testing.assert_allclose(kkk4.meanv, kkk.meanv)
    np.testing.assert_allclose(kkk4.zeta, kkk.zeta)

    with assert_raises(TypeError):
        kkk2 += config
    kkk5 = treecorr.KKKCorrelation(min_sep=min_sep/2, bin_size=bin_size, nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk5
    kkk6 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size/2, nbins=nrbins)
    with assert_raises(ValueError):
        kkk2 += kkk6
    kkk7 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk7
    kkk8 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk8
    kkk0 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_u=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk0
    kkk10 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nubins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk10
    kkk11 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   min_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk11
    kkk12 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   max_v=0.1)
    with assert_raises(ValueError):
        kkk2 += kkk12
    kkk13 = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                   nvbins=nrbins*2)
    with assert_raises(ValueError):
        kkk2 += kkk13

    # Currently not implemented to only have cat2 or cat3
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat2=cat)
    with assert_raises(NotImplementedError):
        kkk.process(cat, cat3=cat)
    with assert_raises(NotImplementedError):
        kkk.process_cross21(cat, cat)
Example #33
0
 def __init__(self, galf, ranf, sysmapf):
     self.galm = hp.read_map(galf, verbose=False)
     self.ranm = hp.read_map(ranf, verbose=False)
     self.sysm = ft.read(sysmapf, lower=True)
     self.cols = self.sysm.dtype.names
     print('attributes : {}'.format(self.cols))
Example #34
0
def gaia_gfas_from_sweep(filename, maglim=18.):
    """Create a set of GFAs for one sweep file.

    Parameters
    ----------
    filename: :class:`str`
        A string corresponding to the full path to a sweep file name.
    maglim : :class:`float`, optional, defaults to 18
        Magnitude limit for GFAs in Gaia G-band.

    Returns
    -------
    :class:`~numpy.ndarray`
        GFA objects from Gaia, formatted according to `desitarget.gfa.gfadatamodel`.
    """
    # ADM read in the objects.
    objects = fitsio.read(filename)

    # ADM As a mild speed up, only consider sweeps objects brighter than 3 mags
    # ADM fainter than the passed Gaia magnitude limit. Note that Gaia G-band
    # ADM approximates SDSS r-band.
    ii = ((objects["FLUX_G"] > 10**((22.5 - (maglim + 3)) / 2.5)) |
          (objects["FLUX_R"] > 10**((22.5 - (maglim + 3)) / 2.5)) |
          (objects["FLUX_Z"] > 10**((22.5 - (maglim + 3)) / 2.5)))
    objects = objects[ii]
    nobjs = len(objects)

    # ADM only retain objects with Gaia matches.
    # ADM It's fine to propagate an empty array if there are no matches
    # ADM The sweeps use 0 for objects with no REF_ID.
    objects = objects[objects["REF_ID"] > 0]

    # ADM determine a TARGETID for any objects on a brick.
    targetid = encode_targetid(objid=objects['OBJID'],
                               brickid=objects['BRICKID'],
                               release=objects['RELEASE'])

    # ADM format everything according to the data model.
    gfas = np.zeros(len(objects), dtype=gfadatamodel.dtype)
    # ADM make sure all columns initially have "ridiculous" numbers.
    gfas[...] = -99.
    gfas["REF_CAT"] = ""
    gfas["REF_EPOCH"] = 2015.5
    # ADM remove the TARGETID, BRICK_OBJID, REF_CAT, REF_EPOCH columns
    # ADM and populate them later as they require special treatment.
    cols = list(gfadatamodel.dtype.names)
    for col in [
            "TARGETID", "BRICK_OBJID", "REF_CAT", "REF_EPOCH", "URAT_ID",
            "URAT_SEP"
    ]:
        cols.remove(col)
    for col in cols:
        gfas[col] = objects[col]
    # ADM populate the TARGETID column.
    gfas["TARGETID"] = targetid
    # ADM populate the BRICK_OBJID column.
    gfas["BRICK_OBJID"] = objects["OBJID"]
    # ADM REF_CAT and REF_EPOCH didn't exist before DR8.
    for refcol in ["REF_CAT", "REF_EPOCH"]:
        if refcol in objects.dtype.names:
            gfas[refcol] = objects[refcol]

    # ADM cut the GFAs by a hard limit on magnitude.
    ii = gfas['GAIA_PHOT_G_MEAN_MAG'] < maglim
    gfas = gfas[ii]

    # ADM remove any sources based on LSLGA (retain Tycho/T2 sources).
    # ADM the try/except/decode catches both bytes and unicode strings.
    try:
        ii = np.array(rc.decode()[0] == "L" for rc in gfas["REF_CAT"])
    except AttributeError:
        ii = np.array([i[0] == "L" for rc in gfas["REF_CAT"]])
    gfas = gfas[~ii]

    return gfas
Example #35
0
def inittab(qsocatpath, outtab):
    '''
    Populate table with information from QSO catalogue,
    make empty tables for BAL information to be added later.

    Parameters
    ----------
    qsocatpath : fits file
        QSO catalogue to be read.
    outtab  : fits file
        where to write resulting catalogue to.

    Returns
    -------
    colnames : list
        card names relating to BALs
    
    '''

    #Open input catalogue fits file to get num objects in catalogue.
    cathdu = fits.open(qsocatpath, lazy_load_hdus=False)
    NROWS = len(cathdu[1].data)

    # Check if strings have been turned into character arrays, and if so read the catalog with fitsio instead
    # This is necessary for fuji and guadalupe
    if cathdu[1].data['SURVEY'].dtype == 'O':
        cathdu[1].data = fitsio.read(qsocatpath)

    # PCA Fit Coefficients and chisq result
    pca_array = np.zeros([NROWS, bc.NPCA], dtype=float)  # PCA Fit Coefficients
    col0 = fits.Column(name='PCA_COEFFS', format='5E', array=pca_array)
    pca_chi2 = np.zeros([NROWS], dtype=float)  # PCA reduced chi2
    col1 = fits.Column(name='PCA_CHI2', format='E', array=pca_chi2)

    # Collection of empty arrays to set up rest of the BAL catalog
    # PM Note: Might change to -1. to indicate empty field
    zfloat_col = np.zeros([NROWS], dtype=float)
    zint_col = np.zeros([NROWS], dtype=float)
    zneg_col = np.array([-99] * NROWS, dtype=float)  # For BAL_PROB
    zbyte_col = np.ones(NROWS, dtype=np.ubyte)  # For bit masking in BALMASK
    zfloat_bicol = np.zeros(
        [NROWS, bc.NBI],
        dtype=float)  # Second dimension is max number of BI troughs
    zfloat_aicol = np.zeros(
        [NROWS, bc.NAI],
        dtype=float)  # Second dimension is max number of AI troughs

    # This will come from CNN Classifier
    # All set to -99 to indicate that data from BALcats not been read yet
    # Will be set to -1 once updated
    col2 = fits.Column(name='BAL_PROB', format='E', array=zneg_col)

    # These quantities are from trough fit
    col3 = fits.Column(name='BI_CIV', format='E', array=zfloat_col)
    col4 = fits.Column(name='ERR_BI_CIV', format='E', array=zfloat_col)
    col5 = fits.Column(name='NCIV_2000', format='J', array=zint_col)
    col6 = fits.Column(name='VMIN_CIV_2000', format='5E', array=zfloat_bicol)
    col7 = fits.Column(name='VMAX_CIV_2000', format='5E', array=zfloat_bicol)
    col8 = fits.Column(name='POSMIN_CIV_2000', format='5E', array=zfloat_bicol)
    col9 = fits.Column(name='FMIN_CIV_2000', format='5E', array=zfloat_bicol)

    col10 = fits.Column(name='AI_CIV', format='E', array=zfloat_col)
    col11 = fits.Column(name='ERR_AI_CIV', format='E', array=zfloat_col)
    col12 = fits.Column(name='NCIV_450', format='J', array=zint_col)
    col13 = fits.Column(name='VMIN_CIV_450', format='17E', array=zfloat_aicol)
    col14 = fits.Column(name='VMAX_CIV_450', format='17E', array=zfloat_aicol)
    col15 = fits.Column(name='POSMIN_CIV_450',
                        format='17E',
                        array=zfloat_aicol)
    col16 = fits.Column(name='FMIN_CIV_450', format='17E', array=zfloat_aicol)

    col17 = fits.Column(name='BI_SIIV', format='E', array=zfloat_col)
    col18 = fits.Column(name='ERR_BI_SIIV', format='E', array=zfloat_col)
    col19 = fits.Column(name='NSIIV_2000', format='J', array=zint_col)
    col20 = fits.Column(name='VMIN_SIIV_2000', format='5E', array=zfloat_bicol)
    col21 = fits.Column(name='VMAX_SIIV_2000', format='5E', array=zfloat_bicol)
    col22 = fits.Column(name='POSMIN_SIIV_2000',
                        format='5E',
                        array=zfloat_bicol)
    col23 = fits.Column(name='FMIN_SIIV_2000', format='5E', array=zfloat_bicol)

    col24 = fits.Column(name='AI_SIIV', format='E', array=zfloat_col)
    col25 = fits.Column(name='ERR_AI_SIIV', format='E', array=zfloat_col)
    col26 = fits.Column(name='NSIIV_450', format='J', array=zint_col)
    col27 = fits.Column(name='VMIN_SIIV_450', format='17E', array=zfloat_aicol)
    col28 = fits.Column(name='VMAX_SIIV_450', format='17E', array=zfloat_aicol)
    col29 = fits.Column(name='POSMIN_SIIV_450',
                        format='17E',
                        array=zfloat_aicol)
    col30 = fits.Column(name='FMIN_SIIV_450', format='17E', array=zfloat_aicol)

    # Seperate column not populated in runbalfinder which serves as a bitmask
    col31 = fits.Column(
        name='BALMASK', format='B',
        array=zbyte_col)  # default is '1' (not found in baltable)

    #Columns relating to BAL information from runbalfinder
    BALcols = fits.ColDefs([
        col0, col1, col2, col3, col4, col5, col6, col7, col8, col9, col10,
        col11, col12, col13, col14, col15, col16, col17, col18, col19, col20,
        col21, col22, col23, col24, col25, col26, col27, col28, col29, col30
    ])

    #Columns already contained in QSO catalogue
    catcols = cathdu[1].columns

    totcols = catcols + BALcols + col31

    #List of card names in BinHDU relating to BALs
    #Will beiterated through in initcreate.popqsotab()
    balcolnames = BALcols.info('name', False)['name']

    #Create a new BinHDU with all columns
    newbhdu = fits.BinTableHDU.from_columns(totcols)
    #Update BinHDU of cathdu with newbhdu
    cathdu[1] = newbhdu
    cathdu[1].header['EXTNAME'] = 'ZCATALOG'

    try:
        cathdu.writeto(outtab)
    except OSError:
        print("File ", outtab, " already exists.")
        return balcolnames

    print("Empty BAL columns added to input QSO catalogue at ",
          str(qsocatpath), " and written to ", str(outtab), ".")

    return balcolnames
Example #36
0
def test_direct():
    # If the catalogs are small enough, we can do a direct calculation to see if comes out right.
    # This should exactly match the treecorr result if bin_slop=0.

    ngal = 200
    s = 10.
    np.random.seed(8675309)
    x1 = np.random.normal(0,s, (ngal,) )
    y1 = np.random.normal(0,s, (ngal,) )
    w1 = np.random.random(ngal)
    k1 = np.random.normal(5,1, (ngal,) )

    x2 = np.random.normal(0,s, (ngal,) )
    y2 = np.random.normal(0,s, (ngal,) )
    w2 = np.random.random(ngal)
    g12 = np.random.normal(0,0.2, (ngal,) )
    g22 = np.random.normal(0,0.2, (ngal,) )

    cat1 = treecorr.Catalog(x=x1, y=y1, w=w1, k=k1)
    cat2 = treecorr.Catalog(x=x2, y=y2, w=w2, g1=g12, g2=g22)

    min_sep = 1.
    max_sep = 50.
    nbins = 50
    bin_size = np.log(max_sep/min_sep) / nbins
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0.)
    kg.process(cat1, cat2)

    true_npairs = np.zeros(nbins, dtype=int)
    true_weight = np.zeros(nbins, dtype=float)
    true_xi = np.zeros(nbins, dtype=complex)
    for i in range(ngal):
        # It's hard to do all the pairs at once with numpy operations (although maybe possible).
        # But we can at least do all the pairs for each entry in cat1 at once with arrays.
        rsq = (x1[i]-x2)**2 + (y1[i]-y2)**2
        r = np.sqrt(rsq)
        logr = np.log(r)
        expmialpha = ((x1[i]-x2) - 1j*(y1[i]-y2)) / r

        ww = w1[i] * w2
        xi = -ww * k1[i] * (g12 + 1j*g22) * expmialpha**2

        index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
        mask = (index >= 0) & (index < nbins)
        np.add.at(true_npairs, index[mask], 1)
        np.add.at(true_weight, index[mask], ww[mask])
        np.add.at(true_xi, index[mask], xi[mask])

    true_xi /= true_weight

    print('true_npairs = ',true_npairs)
    print('diff = ',kg.npairs - true_npairs)
    np.testing.assert_array_equal(kg.npairs, true_npairs)

    print('true_weight = ',true_weight)
    print('diff = ',kg.weight - true_weight)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)

    print('true_xi = ',true_xi)
    print('kg.xi = ',kg.xi)
    print('kg.xi_im = ',kg.xi_im)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-4, atol=1.e-8)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-4, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr2 script works correctly.
    config = treecorr.config.read_config('configs/kg_direct.yaml')
    cat1.write(config['file_name'])
    cat2.write(config['file_name2'])
    treecorr.corr2(config)
    data = fitsio.read(config['kg_file_name'])
    np.testing.assert_allclose(data['R_nom'], kg.rnom)
    np.testing.assert_allclose(data['npairs'], kg.npairs)
    np.testing.assert_allclose(data['weight'], kg.weight)
    np.testing.assert_allclose(data['kgamT'], kg.xi, rtol=1.e-3)
    np.testing.assert_allclose(data['kgamX'], kg.xi_im, rtol=1.e-3)

    # Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=1.e-16,
                                max_top=0)
    kg.process(cat1, cat2)
    np.testing.assert_array_equal(kg.npairs, true_npairs)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-3, atol=1.e-3)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-3, atol=1.e-3)

    # Check a few basic operations with a KGCorrelation object.
    do_pickle(kg)

    kg2 = kg.copy()
    kg2 += kg
    np.testing.assert_allclose(kg2.npairs, 2*kg.npairs)
    np.testing.assert_allclose(kg2.weight, 2*kg.weight)
    np.testing.assert_allclose(kg2.meanr, 2*kg.meanr)
    np.testing.assert_allclose(kg2.meanlogr, 2*kg.meanlogr)
    np.testing.assert_allclose(kg2.xi, 2*kg.xi)
    np.testing.assert_allclose(kg2.xi_im, 2*kg.xi_im)

    kg2.clear()
    kg2 += kg
    np.testing.assert_allclose(kg2.npairs, kg.npairs)
    np.testing.assert_allclose(kg2.weight, kg.weight)
    np.testing.assert_allclose(kg2.meanr, kg.meanr)
    np.testing.assert_allclose(kg2.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg2.xi, kg.xi)
    np.testing.assert_allclose(kg2.xi_im, kg.xi_im)

    ascii_name = 'output/kg_ascii.txt'
    kg.write(ascii_name, precision=16)
    kg3 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    kg3.read(ascii_name)
    np.testing.assert_allclose(kg3.npairs, kg.npairs)
    np.testing.assert_allclose(kg3.weight, kg.weight)
    np.testing.assert_allclose(kg3.meanr, kg.meanr)
    np.testing.assert_allclose(kg3.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg3.xi, kg.xi)
    np.testing.assert_allclose(kg3.xi_im, kg.xi_im)

    fits_name = 'output/kg_fits.fits'
    kg.write(fits_name)
    kg4 = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins)
    kg4.read(fits_name)
    np.testing.assert_allclose(kg4.npairs, kg.npairs)
    np.testing.assert_allclose(kg4.weight, kg.weight)
    np.testing.assert_allclose(kg4.meanr, kg.meanr)
    np.testing.assert_allclose(kg4.meanlogr, kg.meanlogr)
    np.testing.assert_allclose(kg4.xi, kg.xi)
    np.testing.assert_allclose(kg4.xi_im, kg.xi_im)
Example #37
0
def test_direct_spherical():
    # Repeat in spherical coords

    ngal = 100
    s = 10.
    np.random.seed(8675309)
    x1 = np.random.normal(0,s, (ngal,) )
    y1 = np.random.normal(0,s, (ngal,) ) + 200  # Put everything at large y, so small angle on sky
    z1 = np.random.normal(0,s, (ngal,) )
    w1 = np.random.random(ngal)
    k1 = np.random.normal(5,1, (ngal,) )

    x2 = np.random.normal(0,s, (ngal,) )
    y2 = np.random.normal(0,s, (ngal,) ) + 200
    z2 = np.random.normal(0,s, (ngal,) )
    w2 = np.random.random(ngal)
    g12 = np.random.normal(0,0.2, (ngal,) )
    g22 = np.random.normal(0,0.2, (ngal,) )

    ra1, dec1 = coord.CelestialCoord.xyz_to_radec(x1,y1,z1)
    ra2, dec2 = coord.CelestialCoord.xyz_to_radec(x2,y2,z2)

    cat1 = treecorr.Catalog(ra=ra1, dec=dec1, ra_units='rad', dec_units='rad', w=w1, k=k1)
    cat2 = treecorr.Catalog(ra=ra2, dec=dec2, ra_units='rad', dec_units='rad', w=w2, g1=g12, g2=g22)

    min_sep = 1.
    max_sep = 10.
    nbins = 50
    bin_size = np.log(max_sep/min_sep) / nbins
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                sep_units='deg', bin_slop=0.)
    kg.process(cat1, cat2)

    r1 = np.sqrt(x1**2 + y1**2 + z1**2)
    r2 = np.sqrt(x2**2 + y2**2 + z2**2)
    x1 /= r1;  y1 /= r1;  z1 /= r1
    x2 /= r2;  y2 /= r2;  z2 /= r2

    north_pole = coord.CelestialCoord(0*coord.radians, 90*coord.degrees)

    true_npairs = np.zeros(nbins, dtype=int)
    true_weight = np.zeros(nbins, dtype=float)
    true_xi = np.zeros(nbins, dtype=complex)

    c1 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra1, dec1)]
    c2 = [coord.CelestialCoord(r*coord.radians, d*coord.radians) for (r,d) in zip(ra2, dec2)]
    for i in range(ngal):
        for j in range(ngal):
            rsq = (x1[i]-x2[j])**2 + (y1[i]-y2[j])**2 + (z1[i]-z2[j])**2
            r = np.sqrt(rsq)
            r *= coord.radians / coord.degrees
            logr = np.log(r)

            index = np.floor(np.log(r/min_sep) / bin_size).astype(int)
            if index < 0 or index >= nbins:
                continue

            # Rotate shears to coordinates where line connecting is horizontal.
            # Original orientation is where north is up.
            theta2 = 90*coord.degrees - c2[j].angleBetween(c1[i], north_pole)
            expm2theta2 = np.cos(2*theta2) - 1j * np.sin(2*theta2)

            g2 = g12[j] + 1j * g22[j]
            g2 *= expm2theta2

            ww = w1[i] * w2[j]
            xi = -ww * k1[i] * g2

            true_npairs[index] += 1
            true_weight[index] += ww
            true_xi[index] += xi

    true_xi /= true_weight

    print('true_npairs = ',true_npairs)
    print('diff = ',kg.npairs - true_npairs)
    np.testing.assert_array_equal(kg.npairs, true_npairs)

    print('true_weight = ',true_weight)
    print('diff = ',kg.weight - true_weight)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)

    print('true_xi = ',true_xi)
    print('kg.xi = ',kg.xi)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-4, atol=1.e-8)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-4, atol=1.e-8)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr2 script works correctly.
    config = treecorr.config.read_config('configs/kg_direct_spherical.yaml')
    cat1.write(config['file_name'])
    cat2.write(config['file_name2'])
    treecorr.corr2(config)
    data = fitsio.read(config['kg_file_name'])
    np.testing.assert_allclose(data['R_nom'], kg.rnom)
    np.testing.assert_allclose(data['npairs'], kg.npairs)
    np.testing.assert_allclose(data['weight'], kg.weight)
    np.testing.assert_allclose(data['kgamT'], kg.xi, rtol=1.e-3)
    np.testing.assert_allclose(data['kgamX'], kg.xi_im, rtol=1.e-3)

    # Repeat with binslop not precisely 0, since the code flow is different for bin_slop == 0.
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kg = treecorr.KGCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                sep_units='deg', bin_slop=1.e-16, max_top=0)
    kg.process(cat1, cat2)
    np.testing.assert_array_equal(kg.npairs, true_npairs)
    np.testing.assert_allclose(kg.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kg.xi, true_xi.real, rtol=1.e-3, atol=1.e-3)
    np.testing.assert_allclose(kg.xi_im, true_xi.imag, rtol=1.e-3, atol=1.e-3)
Example #38
0
    def run(self, log, terminal, cl_params, mapping_pipelines):

        log.doMessage(
            'INFO',
            '\n{t.underline}Start imaging.{t.normal}'.format(t=terminal))

        # ------------------------------------------------- identify imaging scripts

        MapStruct = namedtuple("MapStruct", "nchans, window, start, end")

        maps = {}
        for mp in mapping_pipelines:
            nchans = int(
                mp.mp_object.row_list.get(mp.start, mp.feed, mp.window,
                                          mp.pol)['NCHANS'])
            maps[MapStruct(nchans, mp.window, mp.start, mp.end)] = set()

        for mp in mapping_pipelines:
            nchans = int(
                mp.mp_object.row_list.get(mp.start, mp.feed, mp.window,
                                          mp.pol)['NCHANS'])
            maps[MapStruct(nchans, mp.window, mp.start, mp.end)].add(mp.feed)

        log.doMessage('DBG', 'maps', maps)

        for thismap in maps:

            log.doMessage(
                'INFO', 'Imaging window {win} '
                'for map scans {start}-{stop}'.format(win=thismap.window,
                                                      start=thismap.start,
                                                      stop=thismap.end))

            scanrange = str(thismap.start) + '_' + str(thismap.end)

            imfiles = glob.glob('*' + scanrange + '*window' +
                                str(thismap.window) + '*pol*' + '.fits')

            if not imfiles:
                # no files found
                log.doMessage('ERR', 'No calibrated files found.')
                continue

            # filter file list to only include those with a feed calibrated for use in this map
            feeds = map(str, sorted(maps[thismap]))

            ff = fitsio.FITS(imfiles[0])
            nchans = int([
                xxx['tdim'] for xxx in ff[1].get_info()['colinfo']
                if xxx['name'] == 'DATA'
            ][0][0])
            ff.close()
            if cl_params.channels:
                channels = str(cl_params.channels)
            elif nchans:
                chan_min = int(nchans * .02)  # start at 2% of nchan
                chan_max = int(nchans * .98)  # end at 98% of nchans
                channels = str(chan_min) + ':' + str(chan_max)

            infiles = ' '.join(imfiles)

            if cl_params.keeptempfiles:
                keeptempfiles = '1'
            else:
                keeptempfiles = '0'

            # get the source name and restfrequency from an input file
            tabledata = fitsio.read(imfiles[0])
            source = tabledata['OBJECT'][0].strip()
            restfreq = tabledata['RESTFREQ'][0]
            del tabledata

            freq = "_%.0f_MHz" % (restfreq * 1e-6)
            output_basename = source + '_' + scanrange + freq

            if cl_params.average <= 1:
                average = 1
            else:
                average = cl_params.average

            if cl_params.clobber:
                clobber = ' --clobber'
            else:
                clobber = ''

            self.grid(log, channels, str(average), output_basename,
                      str(cl_params.verbose), clobber, infiles)
Example #39
0
def mask_targets(targs, inmaskdir, nside=2, pixlist=None):
    """Add bits for if objects occupy masks, and SAFE (BADSKY) locations.

    Parameters
    ----------
    targs : :class:`str` or `~numpy.ndarray`
        An array of targets/skies etc. created by, e.g.,
        :func:`desitarget.cuts.select_targets()` OR the filename of a
        file that contains such a set of targets/skies, etc.
    inmaskdir : :class:`str`, optional
        An input bright star mask file or HEALPixel-split directory as
        made by :func:`desitarget.brightmask.make_bright_star_mask()`
    nside : :class:`int`, optional, defaults to 2
        The nside at which the targets were generated. If the mask is
        a HEALPixel-split directory, then this helps to perform more
        efficient masking as only the subset of masks that are in
        pixels containing `targs` at this `nside` will be considered
        (together with neighboring pixels to account for edge effects).
    pixlist : :class:`list` or `int`, optional
        A set of HEALPixels corresponding to the `targs`. Only the subset
        of masks in HEALPixels in `pixlist` at `nside` will be considered
        (together with neighboring pixels to account for edge effects).
        If ``None``, then the pixels touched by `targs` is derived from
        from `targs` itself.

    Returns
    -------
    :class:`~numpy.ndarray`
        Input targets with the `DESI_TARGET` column updated to reflect
        the `BRIGHT_OBJECT` bits and SAFE (`BADSKY`) sky locations added
        around the perimeter of the mask.

    Notes
    -----
        - `Tech Note 2346`_ details SAFE (BADSKY) locations.
    """
    t0 = time()

    # ADM Check if targs is a file name or the structure itself.
    if isinstance(targs, str):
        if not os.path.exists(targs):
            raise ValueError("{} doesn't exist".format(targs))
        targs = fitsio.read(targs)

    # ADM determine which pixels are occupied by targets.
    if pixlist is None:
        theta, phi = np.radians(90-targs["DEC"]), np.radians(targs["RA"])
        pixlist = list(set(hp.ang2pix(nside, theta, phi, nest=True)))
    else:
        # ADM in case an integer was passed.
        pixlist = np.atleast_1d(pixlist)
    log.info("Masking using masks in {} at nside={} in HEALPixels={}".format(
        inmaskdir, nside, pixlist))
    pixlist = add_hp_neighbors(nside, pixlist)

    # ADM read in the (potentially HEALPixel-split) mask.
    sourcemask = io.read_targets_in_hp(inmaskdir, nside, pixlist)

    ntargs = len(targs)
    log.info('Total number of masks {}'.format(len(sourcemask)))
    log.info('Total number of targets {}...t={:.1f}s'.format(ntargs, time()-t0))

    # ADM update the bits depending on whether targets are in a mask.
    # ADM also grab masks that contain or are near a target.
    dt, mx = set_target_bits(targs, sourcemask, return_masks=True)
    targs["DESI_TARGET"] = dt
    inmasks, nearmasks = mx

    # ADM generate SAFE locations for masks that contain a target.
    safes = get_safe_targets(targs, sourcemask[inmasks])
    # ADM update the bits for the safe locations depending on whether
    # ADM they're in a mask.
    safes["DESI_TARGET"] = set_target_bits(safes, sourcemask)
    # ADM combine the targets and safe locations.
    done = np.concatenate([targs, safes])

    log.info('Generated {} SAFE (BADSKY) locations...t={:.1f}s'.format(
        len(done)-ntargs, time()-t0))

    # ADM remove any SAFE locations that are in bright masks (because they aren't really safe).
    ii = (((done["DESI_TARGET"] & desi_mask.BAD_SKY) == 0) |
          ((done["DESI_TARGET"] & desi_mask.IN_BRIGHT_OBJECT) == 0))
    done = done[ii]

    log.info("...of these, {} SAFE (BADSKY) locations aren't in masks...t={:.1f}s"
             .format(len(done)-ntargs, time()-t0))

    log.info('Finishing up...t={:.1f}s'.format(time()-t0))

    return done
def main():

    global plate
    global mjd
    global fiber
    global ra
    global dec
    global thing_id
    # ================= PARSER =================
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        '--cat',
        type=str,
        default=None,
        help='full path to the QUASAR catalogue FITS file, required')
    parser.add_argument(
        '--nobs',
        type=int,
        default=None,
        help=
        'whith this option, the program will show only objects that were observed a specified number of times'
    )
    parser.add_argument('--id',
                        type=int,
                        default=None,
                        help='the unique identifier of the object')
    parser.add_argument('--ra',
                        type=float,
                        default=None,
                        help='target right ascension')
    parser.add_argument('--dec',
                        type=float,
                        default=None,
                        help='target declination')
    parser.add_argument('--rad',
                        type=float,
                        default=None,
                        help='search radius, must specify --ra and --dec')
    parser.add_argument('-v', action='store_true', help='verbose')
    parser.add_argument(
        '--index',
        type=int,
        default=None,
        nargs='+',
        help='position of the object in the catalog (row number)')
    parser.add_argument(
        '--multi',
        action='store_true',
        help='identify duplicate QSOs by comparing unique object identifiers')
    args = parser.parse_args()

    if args.cat:
        catalog_path = args.cat
        columns = ['PLATE', 'MJD', 'FIBERID', 'RA', 'DEC', 'THING_ID']
        catdata, cathead = fitsio.read(catalog_path,
                                       ext=1,
                                       header=True,
                                       columns=columns)
    else:
        parser.error(
            'Must specify the path to the QSO catalog using --cat option.')

    # CODE TO REDUCE THE spAll-DR12.fits file to contain only neccessary data
    #reduced_cat = fitsio.FITS('/Volumes/Transcend/Isotropy/spectra/spAll-DR12-reduced.fits','rw')
    #hdict = {'PLATE', 'MJD', 'FIBERID', 'RA', 'DEC','THING_ID'}
    #nrows = catdata.size
    #reduced_cat.write(catdata)
    #reduced_cat.close()
    #sys.exit()

    # ================= READ DATA FROM CATALOG =================
    s = catdata.size
    plate = np.zeros(s, dtype='int')
    mjd = np.zeros(s, dtype='int')
    fiber = np.zeros(s, dtype='int')
    ra = np.zeros(s, dtype='float')
    dec = np.zeros(s, dtype='float')
    thing_id = np.zeros(s, dtype='int')
    for i in xrange(s):
        plate[i] = catdata[i][0]
        mjd[i] = catdata[i][1]
        fiber[i] = catdata[i][2]
        ra[i] = catdata[i][3]
        dec[i] = catdata[i][4]
        thing_id[i] = catdata[i][5]
    # ================= INDEX tag =================
    if args.index:
        index = args.index
        print 'PRINTING ALL OBSERVATIONS ON THE OBJECT IN ROW', index
        for i in index:
            if args.v:
                print_info_verbose(i)
            else:
                print_info_short(i)
    # ================= MULTI tag =================
    if args.multi:
        # the search criterion is the unique 'thing_id' identifier
        # return the positions of objects with the same thing_id value
        uniq_src = np.unique(thing_id)
        s = uniq_src.size
        if s == thing_id.size:
            print 'All QSOs in the catalog are unique, quitting program'
            sys.exit()


#        index = np.zeros(np.size(thing_id),dtype=('i8,i6'))
        k = 0
        X = []
        for i in uniq_src:
            ind = np.nonzero(thing_id == i)[0]
            #index[count] = (i,np.nonzero(thing_id==i)[0])
            entry = (i, ind.size, ind)
            if args.nobs:
                if ind.size == args.nobs:
                    X.append(entry)
                    print k, X[k]
                    k = k + 1
            else:
                X.append(entry)
                print k, X[k]
                k = k + 1
    # ================= ID tag =================
    if args.id:
        pos = np.nonzero(thing_id == args.id)[0]
        print 'PRINTING DATA ON ALL OBSERVATIONS OF OBJECT ', args.id
        print '======================================================'
        for i in pos:
            if args.v:
                print_info_verbose(i)
            else:
                print_info_short(i)
    # ================= COORDINATES tag =================
    if (args.ra and args.dec and not args.rad):
        print 'Radius not given'
    elif (not args.ra and args.dec and args.rad):
        print 'RA not given'
    elif (args.ra and not args.dec and args.rad):
        print 'DEC not given'
    elif (args.ra and args.dec and args.rad):
        ra0 = args.ra
        dec0 = args.dec
        rad = args.rad

        coord_0 = SkyCoord(ra0, dec0, unit='deg')
        dist_list = []
        coord_array = SkyCoord(ra, dec, unit='deg')
        dist_array = coord_0.separation(coord_array)
        src_list = []
        for src in xrange(catdata.size):
            inside_radius = dist_array[src].is_within_bounds(
                0 * u.deg, rad * u.arcsec)
            if inside_radius:
                src_list.append(src)
                print src
        print src_list
Example #41
0
def process(inpath, args, pool=None, pool_timeout=300):
    """Process a single GFA exposure.
    """
    global GFA
    if not inpath.exists():
        logging.error('Non-existant path: {0}'.format(inpath))
        return
    if args.dry_run:
        print(f'{inpath}')
        return
    # Is this a skycam exposure?
    if inpath.name.startswith('sky'):
        process_sky(inpath, args.outpath)
        return
    # Is this a guiding exposure?
    guiding = inpath.name.startswith('guide')
    # Lookup the NIGHT, EXPID, EXPTIME from the primary header.
    hdr_ext = 'GUIDER' if guiding else 'GFA'
    hdr = fitsio.read_header(str(inpath), ext=hdr_ext)
    for k in 'NIGHT', 'EXPID', 'EXPTIME':
        if k not in hdr:
            logging.info('Skipping exposure with missing {0}: {1}'.format(
                k, inpath))
            return
    cameras_key = 'ACQCAM' if guiding else 'IMAGECAM'
    cameras = hdr.get(cameras_key)
    if cameras is None:
        logging.info('Skipping exposure with missing {0}/{1}: {2}'.format(
            hdr_ext, cameras_key, inpath))
        return
    if cameras == 'GUIDE0,FOCUS1,GUIDE2,GUIDE3,FOCUS4,GUIDE5,FOCUS6,GUIDE7,GUIDE8,FOCUS':
        # The full lists of 10 cameras exceeds the 71-char max length in the FITS standard.
        # https://heasarc.gsfc.nasa.gov/docs/software/fitsio/c/c_user/node20.html
        logging.warning('Patching {0}/{1} keyword value.'.format(
            hdr_ext, cameras_key))
        cameras = 'GUIDE0,FOCUS1,GUIDE2,GUIDE3,FOCUS4,GUIDE5,FOCUS6,GUIDE7,GUIDE8,FOCUS9'
    if cameras == 'G0,G2,G3,G5,G7,G8,F1,F4,F6,F9':
        # Data after the Nov-2020 restart needs this for GFA FITS files.
        cameras = 'GUIDE0,FOCUS1,GUIDE2,GUIDE3,FOCUS4,GUIDE5,FOCUS6,GUIDE7,GUIDE8,FOCUS9'
    night = str(hdr['NIGHT'])
    expid = '{0:08d}'.format(hdr['EXPID'])
    if hdr['EXPTIME'] == 0:
        logging.info('Skipping zero EXPTIME: {0}/{1}'.format(night, expid))
        return
    stars_expected = {}
    if guiding and args.guide_stars:
        assert GMM is not None, 'GMM not initialized.'
        PlateMaker, GuiderExpected = None, None
        try:
            GuiderExpected, _, _ = load_guider_centroids(inpath.parent, expid)
        except (ValueError, KeyError):
            logging.warning('Guider centroids json file not readable.')
        try:
            PlateMaker = fitsio.read(str(inpath), ext='PMGSTARS')
        except IOError as e:
            logging.warning('PMGSTARS extension not found.')
        if PlateMaker is not None:
            # Use PlateMaker (row, col) for expected positions of each guide star.
            for camera in np.unique(PlateMaker['GFA_LOC']):
                stars = PlateMaker[PlateMaker['GFA_LOC'] == camera]
                stars_expected[camera] = np.array(
                    (stars['COL'], stars['ROW'], stars['MAG'])).T
        elif GuiderExpected is not None:
            # Fall back to guider centroids.  I assume the JSON files uses the same coordinate
            # convention as PlateMaker since it copies the PlateMaker values when both are present.
            for camera in GuiderExpected:
                nstars = len(GuiderExpected[camera])
                if nstars > 0:
                    stars_expected[camera] = np.array(
                        (GuiderExpected[camera][:, 0],
                         GuiderExpected[camera][:, 1], np.zeros(nstars))).T
        else:
            logging.warning(f'No guide stars available for {night}/{expid}.')
    # Prepare the output path.
    outpath = args.outpath / night / expid
    outpath.mkdir(parents=True, exist_ok=True)
    # Are there already existing outputs?
    fitspath = outpath / 'gfaetc_{0}.fits'.format(expid)
    if fitspath.exists() and not args.overwrite:
        logging.info('Will not overwrite outputs in {0}'.format(outpath))
        return
    # Process each camera in the input.
    logging.info('Processing {0} from {1}'.format(cameras, inpath))
    results = {}
    framepath = outpath if args.save_frames else None
    only_cameras = [] if args.only_cameras is None else args.only_cameras.split(
        ',')
    for camera in cameras.split(','):
        if camera not in GFA.gfa_names:
            logging.warning(
                'Ignoring invalid camera name in header: "{0}".'.format(
                    camera))
            continue
        if len(only_cameras) > 0 and camera not in only_cameras:
            logging.info('Skipping {0} because only-cameras is "{1}".'.format(
                camera, args.only_cameras))
            continue
        if guiding and camera.startswith('FOCUS'):
            # Guiding exposures do not record FOCUS data.
            continue
        # Fetch this camera's CCD temperatures and exposure times.
        if guiding:
            info = fitsio.read(str(inpath),
                               ext=camera + 'T',
                               columns=('MJD-OBS', 'EXPTIME', 'GCCDTEMP'))
            # stars might be None if --guide-stars arg not present or no guide stars available.
            stars = stars_expected.get(camera, None)
        else:
            info = fitsio.read_header(str(inpath), ext=camera)
            stars = None
        mjdobs = info['MJD-OBS']
        exptime = info['EXPTIME']
        ccdtemp = info['GCCDTEMP']
        process_args = (inpath, night, expid, guiding, camera, mjdobs, exptime,
                        ccdtemp, framepath, stars, args.max_dither,
                        args.num_dither)
        if pool is None:
            result = process_one(*process_args)
            if result is None:
                logging.error('Error processing HDU {0}'.format(camera))
            else:
                results[camera] = result
        else:
            results[camera] = pool.apply_async(process_one, process_args)
    if pool:
        # Collect the pooled results.
        for camera in results:
            try:
                result = results[camera].get(timeout=pool_timeout)
                if result is None:
                    logging.error(
                        'Error pool processing HDU {0}'.format(camera))
                else:
                    results[camera] = result
            except TimeoutError:
                logging.error(
                    'Timeout waiting for {0} pool result'.format(camera))
                del results[camera]
    # Save the output FITS file.
    with fitsio.FITS(str(fitspath), 'rw', clobber=True) as hdus:
        meta = {
            k: hdr.get(k)
            for k in ('NIGHT', 'EXPID', 'MJD-OBS', 'EXPTIME', 'PROGRAM',
                      'HEXPOS', 'TRUSTEMP', 'ADC1PHI', 'ADC2PHI', 'MOUNTHA',
                      'MOUNTAZ', 'MOUNTEL', 'MOUNTDEC')
        }
        hdus.write(np.zeros(1), header=meta)
        for camera in results:
            # Retrieve the result of processing the initial image and any subsequent guide frames.
            initial, frames = results[camera]
            if camera.startswith('GUIDE'):
                hdus.write(np.stack(initial).astype(np.float32),
                           extname=camera)
            else:
                L, R = initial
                if L is not None:
                    hdus.write(np.stack(L).astype(np.float32),
                               extname=camera + 'L')
                if R is not None:
                    hdus.write(np.stack(R).astype(np.float32),
                               extname=camera + 'R')
            if frames is not None:
                (Dsum, WDsum, Msum, fit_params, gmm_params) = frames
                hdus.write(np.stack((Dsum, WDsum, Msum)).astype(np.float32),
                           extname=camera + 'G')
                hdus.write(fit_params.astype(np.float32), extname=camera + 'P')
                hdus.write(gmm_params.astype(np.float32), extname=camera + 'M')
    try:
        # Produce a summary plot of the delivered image quality measured from the first image.
        fig = plot_image_quality(
            {camera: result[0]
             for camera, result in results.items()}, meta)
        # Save the summary plot.
        figpath = outpath / 'gfadiq_{0}.png'.format(expid)
        plt.savefig(figpath)
        plt.close(fig)
        logging.info('Wrote {0}'.format(figpath))
    except Exception as e:
        logging.warning('Failed to create image quality plot.')
Example #42
0
    print('Number of pixels below the 2-3rd of average population:{}'.format(np.sum(low)))
    wl = np.where(low)[0]
    weights = np.ones(len(np.unique(pix)))
    weights[wl] = np.round(histpix[p][wl]/avgpop, 2)
#     print(len(weights),len(low),len(histpix),weights)
    upix = np.unique(pix)
    return pix,upix, weights


###########################################################
nside = 8
path = '/utahsystem/July2020/'
data = 'eBOSS_LRG_NGC_pip_v7_2_new.dat.fits'
random = 'eBOSS_LRG_NGC_pip_v7_2_ran_withS.fits'

d = fitsio.read(path+data)
dra = d['RA']
ddec = d['DEC']

r = fitsio.read(path+random)
rra = r['RA']
rdec = r['DEC']

pix, uniqpix, weights = get_pixnum(rra,rdec,nside)

picut, pimax = 60, 60



###################################
chunks = glob(path+'/RR_NGC_LRG_z0.6_z1.0_downsampled/*.fits')
Example #43
0
def _cached_catalog_read(fname):
    return fitsio.read(fname)
Example #44
0
save_dir = os.path.join(os.getcwd(), cfg['output']['save_dir'])
if not os.path.exists(save_dir):
    os.mkdir(save_dir)

log_dir = os.path.join(os.getcwd(), cfg['output']['save_dir'], cfg['output']['log_dir'])
if not os.path.exists(log_dir):
    os.mkdir(log_dir)

try:
    sig_cut = float(sys.argv[1])
except:
    sig_cut = 5.5

print('Plotting hotspots with sig > {}'.format(sig_cut))

candidate_list = fits.read(candidate_list)
try: # simple
    candidate_list = candidate_list[candidate_list['SIG'] > sig_cut]
except: # ugali
    candidate_list = candidate_list[candidate_list['TS'] > 25]

# for PS1
#candidate_list = candidate_list[candidate_list['DEC'] > -15]

print('{} candidates found...').format(len(candidate_list))

############################################################

#for candidate in [candidate_list[:10]]:
for candidate in candidate_list:
    try: # simple
Example #45
0
def run(self):
    outdir=mkdir(self.config['output']['simdir'])
    logdir=mkdir(join(outdir,'log'))

    # Actually copy config instead of re-writing
    shutil.copy(self.config.filename,outdir)
    configfile = join(outdir,os.path.basename(self.config.filename))

    if 'simulate' in self.opts.run:
        logger.info("Running 'simulate'...")

        if self.opts.num is None: self.opts.num = self.config['simulator']['njobs']
        for i in range(self.opts.num):
            outfile=join(outdir,self.config['output']['simfile']%i)
            base = splitext(os.path.basename(outfile))[0]
            logfile=join(logdir,base+'.log')
            jobname=base
            script = self.config['simulator']['script']
            cmd='%s %s %s --seed %i'%(script,configfile,outfile,i)
            #cmd='%s %s %s'%(script,self.opts.config,outfile)
            self.batch.submit(cmd,jobname,logfile)
            time.sleep(0.1)

    if 'analyze' in self.opts.run:
        logger.info("Running 'analyze'...")
        dirname = self.config['simulate']['dirname']
        catfiles = sorted(glob.glob(join(dirname,self.config['simulate']['catfile'])))
        popfile = join(dirname,self.config['simulate']['popfile'])
        batch = self.config['simulate']['batch']

        for i,catfile in enumerate(catfiles):
            basename = os.path.basename(catfile)
            outfile = join(outdir,basename)
            base = splitext(os.path.basename(outfile))[0]
            logfile=join(logdir,base+'.log')
            jobname=base

            if exists(outfile) and not self.opts.force:
                msg = "Found %s;"%outfile
                if exists(logfile) and len(self.batch.bfail(logfile)):
                    msg += " failed."
                    logger.info(msg)
                else:
                    msg += " skipping..."
                    logger.info(msg)
                    continue
                    
            script = self.config['simulate']['script']
            cmd='%s %s -m 0 --rerun -p %s -c %s -o %s'%(script,configfile,popfile,catfile,outfile)
            self.batch.max_jobs = batch.get('max_jobs',200)
            opts = batch.get(self.opts.queue,dict())
            self.batch.submit(cmd,jobname,logfile,**opts)
            time.sleep(0.1)
        
    if 'sensitivity' in self.opts.run:
        logger.info("Running 'sensitivity'...")

    if 'merge' in self.opts.run:
        logger.info("Running 'merge'...")

        filenames=join(outdir,self.config['simulate']['catfile'])
        infiles=sorted(glob.glob(filenames))
        print("Reading %i files..."%len(infiles))
        data = np.concatenate([fitsio.read(f,ext=1) for f in infiles])
        hdr = fitsio.read_header(infiles[0],ext=1)

        outfile = "./merged_sims.fits"
        logger.info("Writing %s..."%outfile)
        fitsio.write(outfile,data,header=hdr,clobber=True)
        
    if 'plot' in self.opts.run:
        logger.info("Running 'plot'...")
        import ugali.utils.plotting
        import pylab as plt

        plotdir = mkdir(self.config['output']['plotdir'])

        data = fitsio.read(join(outdir,"merged_sims.fits"))
        data = data[~np.isnan(data['ts'])]
        
        bigfig,bigax = plt.subplots()
        
        for dist in np.unique(data['fit_distance']):
            logger.info('  Plotting distance: %s'%dist)
            ts = data['ts'][data['fit_distance'] == dist]
            ugali.utils.plotting.drawChernoff(bigax,ts,bands='none',color='gray')
            
            fig,ax = plt.subplots(1,2,figsize=(10,5))
            ugali.utils.plotting.drawChernoff(ax[0],ts,bands='none',pdf=True)
            ugali.utils.plotting.drawChernoff(ax[1],ts)
            fig.suptitle(r'Chernoff ($\mu = %g$)'%dist)
            ax[0].annotate(r"$N=%i$"%len(ts), xy=(0.15,0.85), xycoords='axes fraction', 
                           bbox={'boxstyle':"round",'fc':'1'})
            basename = 'chernoff_u%g.png'%dist
            outfile = os.path.join(plotdir,basename)
            plt.savefig(outfile)
        bigfig.suptitle('Chernoff!')
        basename = 'chernoff_all.png'
        outfile = os.path.join(plotdir,basename)
        plt.savefig(outfile)

        #idx=np.random.randint(len(data['ts'])-1,size=400)
        #idx=slice(400)
        #ugali.utils.plotting.plotChernoff(data['ts'][idx])
        #ugali.utils.plotting.plotChernoff(data['fit_ts'])
        plt.ion()
        """
Example #46
0
        -c <couchbase config>   - default environment COUCHBASE_BACKEND_CFG 
        -i - reinitialize the object id counter so that the next object will be given oject id = 1
    """

    opts, args = getopt.getopt(sys.argv[1:], "n:h?c:i")
    opts = dict(opts)
    if '-h' in opts or '-?' in opts or len(args) != 3:
        print Usage
        sys.exit(1)

    init_oid = "-i" in opts
    config = opts.get("-c")
    group_size = int(opts.get("-n", 100000))

    bucket, dataset, path = args

    data = fitsio.read(path)
    print "%d objects in the input file %s" % (len(data), path)
    backend = CouchBaseBackend(bucket)

    if init_oid:
        counter_key = "%s:@bliss_next_object_id" % (dataset, )
        try:
            backend.delete([counter_key])  # remove if exists
        except:
            pass
        backend.counter(counter_key, initial=1)
        print "Counter bliss_next_object_id initialized to 1"

    add_objects(backend, data, dataset, group_size)
Example #47
0
def test_kkk():
    # Use kappa(r) = A exp(-r^2/2s^2)
    #
    # The Fourier transform is: kappa~(k) = 2 pi A s^2 exp(-s^2 k^2/2) / L^2
    #
    # B(k1,k2) = <k~(k1) k~(k2) k~(-k1-k2)>
    #          = (2 pi A (s/L)^2)^3 exp(-s^2 (|k1|^2 + |k2|^2 - k1.k2))
    #          = (2 pi A (s/L)^2)^3 exp(-s^2 (|k1|^2 + |k2|^2 + |k3|^2)/2)
    #
    # zeta(r1,r2) = (1/2pi)^4 int(d^2k1 int(d^2k2 exp(ik1.x1) exp(ik2.x2) B(k1,k2) ))
    #             = 2/3 pi A^3 (s/L)^2 exp(-(x1^2 + y1^2 + x2^2 + y2^2 - x1x2 - y1y2)/3s^2)
    #             = 2/3 pi A^3 (s/L)^2 exp(-(d1^2 + d2^2 + d3^2)/6s^2)

    A = 0.05
    s = 10.
    if __name__ == '__main__':
        ngal = 200000
        L = 30. * s  # Not infinity, so this introduces some error.  Our integrals were to infinity.
        tol_factor = 1
    else:
        # Looser tests from nosetests that don't take so long to run.
        ngal = 10000
        L = 20. * s
        tol_factor = 5
    rng = np.random.RandomState(8675309)
    x = (rng.random_sample(ngal)-0.5) * L
    y = (rng.random_sample(ngal)-0.5) * L
    r2 = (x**2 + y**2)/s**2
    kappa = A * np.exp(-r2/2.)

    min_sep = 11.
    max_sep = 15.
    nbins = 3
    min_u = 0.7
    max_u = 1.0
    nubins = 3
    min_v = 0.1
    max_v = 0.3
    nvbins = 2

    cat = treecorr.Catalog(x=x, y=y, k=kappa, x_units='arcmin', y_units='arcmin')
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                  min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
                                  nubins=nubins, nvbins=nvbins,
                                  sep_units='arcmin', verbose=1)
    kkk.process(cat)

    # log(<d>) != <logd>, but it should be close:
    print('meanlogd1 - log(meand1) = ',kkk.meanlogd1 - np.log(kkk.meand1))
    print('meanlogd2 - log(meand2) = ',kkk.meanlogd2 - np.log(kkk.meand2))
    print('meanlogd3 - log(meand3) = ',kkk.meanlogd3 - np.log(kkk.meand3))
    print('meand3 / meand2 = ',kkk.meand3 / kkk.meand2)
    print('meanu = ',kkk.meanu)
    print('max diff = ',np.max(np.abs(kkk.meand3/kkk.meand2 -kkk.meanu)))
    print('max rel diff = ',np.max(np.abs((kkk.meand3/kkk.meand2 -kkk.meanu)/kkk.meanu)))
    print('(meand1 - meand2)/meand3 = ',(kkk.meand1-kkk.meand2) / kkk.meand3)
    print('meanv = ',kkk.meanv)
    print('max diff = ',np.max(np.abs((kkk.meand1-kkk.meand2)/kkk.meand3 -np.abs(kkk.meanv))))
    print('max rel diff = ',np.max(np.abs(((kkk.meand1-kkk.meand2)/kkk.meand3-np.abs(kkk.meanv))
                                          / kkk.meanv)))
    np.testing.assert_allclose(kkk.meanlogd1, np.log(kkk.meand1), rtol=1.e-3)
    np.testing.assert_allclose(kkk.meanlogd2, np.log(kkk.meand2), rtol=1.e-3)
    np.testing.assert_allclose(kkk.meanlogd3, np.log(kkk.meand3), rtol=1.e-3)
    np.testing.assert_allclose(kkk.meand3/kkk.meand2, kkk.meanu, rtol=1.e-5 * tol_factor)
    np.testing.assert_allclose(np.abs(kkk.meand1-kkk.meand2)/kkk.meand3, np.abs(kkk.meanv),
                               rtol=1.e-5 * tol_factor, atol=1.e-5 * tol_factor)
    np.testing.assert_allclose(kkk.meanlogd3-kkk.meanlogd2, np.log(kkk.meanu),
                               atol=1.e-3 * tol_factor)
    np.testing.assert_allclose(np.log(np.abs(kkk.meand1-kkk.meand2))-kkk.meanlogd3,
                               np.log(np.abs(kkk.meanv)), atol=2.e-3 * tol_factor)

    d1 = kkk.meand1
    d2 = kkk.meand2
    d3 = kkk.meand3
    #print('rnom = ',np.exp(kkk.logr))
    #print('unom = ',kkk.u)
    #print('vnom = ',kkk.v)
    #print('d1 = ',d1)
    #print('d2 = ',d2)
    #print('d3 = ',d3)
    # The L^2 term in the denominator of true_zeta is the area over which the integral is done.
    # Since the centers of the triangles don't go to the edge of the box, we approximate the
    # correct area by subtracting off 2d2 from L, which should give a slightly better estimate
    # of the correct area to use here.
    L = L - 2.*d2
    true_zeta = (2.*np.pi/3) * A**3 * (s/L)**2 * np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2))

    #print('ntri = ',kkk.ntri)
    print('zeta = ',kkk.zeta)
    print('true_zeta = ',true_zeta)
    #print('ratio = ',kkk.zeta / true_zeta)
    #print('diff = ',kkk.zeta - true_zeta)
    print('max rel diff = ',np.max(np.abs((kkk.zeta - true_zeta)/true_zeta)))
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=0.1 * tol_factor)
    np.testing.assert_allclose(np.log(np.abs(kkk.zeta)), np.log(np.abs(true_zeta)),
                               atol=0.1 * tol_factor)

    # Check that we get the same result using the corr3 functin:
    cat.write(os.path.join('data','kkk_data.dat'))
    config = treecorr.config.read_config('configs/kkk.yaml')
    config['verbose'] = 0
    treecorr.corr3(config)
    corr3_output = np.genfromtxt(os.path.join('output','kkk.out'), names=True, skip_header=1)
    np.testing.assert_almost_equal(corr3_output['zeta'], kkk.zeta.flatten())

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check the fits write option
    out_file_name = os.path.join('output','kkk_out.fits')
    kkk.write(out_file_name)
    data = fitsio.read(out_file_name)
    np.testing.assert_almost_equal(data['r_nom'], np.exp(kkk.logr).flatten())
    np.testing.assert_almost_equal(data['u_nom'], kkk.u.flatten())
    np.testing.assert_almost_equal(data['v_nom'], kkk.v.flatten())
    np.testing.assert_almost_equal(data['meand1'], kkk.meand1.flatten())
    np.testing.assert_almost_equal(data['meanlogd1'], kkk.meanlogd1.flatten())
    np.testing.assert_almost_equal(data['meand2'], kkk.meand2.flatten())
    np.testing.assert_almost_equal(data['meanlogd2'], kkk.meanlogd2.flatten())
    np.testing.assert_almost_equal(data['meand3'], kkk.meand3.flatten())
    np.testing.assert_almost_equal(data['meanlogd3'], kkk.meanlogd3.flatten())
    np.testing.assert_almost_equal(data['meanu'], kkk.meanu.flatten())
    np.testing.assert_almost_equal(data['meanv'], kkk.meanv.flatten())
    np.testing.assert_almost_equal(data['zeta'], kkk.zeta.flatten())
    np.testing.assert_almost_equal(data['sigma_zeta'], np.sqrt(kkk.varzeta.flatten()))
    np.testing.assert_almost_equal(data['weight'], kkk.weight.flatten())
    np.testing.assert_almost_equal(data['ntri'], kkk.ntri.flatten())

    # Check the read function
    # Note: These don't need the flatten. The read function should reshape them to the right shape.
    kkk2 = treecorr.KKKCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins,
                                   min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v,
                                   nubins=nubins, nvbins=nvbins,
                                   sep_units='arcmin', verbose=1)
    kkk2.read(out_file_name)
    np.testing.assert_almost_equal(kkk2.logr, kkk.logr)
    np.testing.assert_almost_equal(kkk2.u, kkk.u)
    np.testing.assert_almost_equal(kkk2.v, kkk.v)
    np.testing.assert_almost_equal(kkk2.meand1, kkk.meand1)
    np.testing.assert_almost_equal(kkk2.meanlogd1, kkk.meanlogd1)
    np.testing.assert_almost_equal(kkk2.meand2, kkk.meand2)
    np.testing.assert_almost_equal(kkk2.meanlogd2, kkk.meanlogd2)
    np.testing.assert_almost_equal(kkk2.meand3, kkk.meand3)
    np.testing.assert_almost_equal(kkk2.meanlogd3, kkk.meanlogd3)
    np.testing.assert_almost_equal(kkk2.meanu, kkk.meanu)
    np.testing.assert_almost_equal(kkk2.meanv, kkk.meanv)
    np.testing.assert_almost_equal(kkk2.zeta, kkk.zeta)
    np.testing.assert_almost_equal(kkk2.varzeta, kkk.varzeta)
    np.testing.assert_almost_equal(kkk2.weight, kkk.weight)
    np.testing.assert_almost_equal(kkk2.ntri, kkk.ntri)
    assert kkk2.coords == kkk.coords
    assert kkk2.metric == kkk.metric
    assert kkk2.sep_units == kkk.sep_units
    assert kkk2.bin_type == kkk.bin_type
Example #48
0
    def calculate(self, infile, field=1, simple=False):
        logger.info("Calculating magnitude limit from %s" % infile)

        #manglefile = self.config['mangle']['infile_%i'%field]
        #footfile = self.config['data']['footprint']
        #try:
        #    footprint = fitsio.read(footfile)['I'].ravel()
        #except:
        #    logger.warn("Couldn't open %s; will try again."%footfile)
        #    footprint = footfile

        mag_column = self.config['catalog']['mag_%i_field' % field]
        magerr_column = self.config['catalog']['mag_err_%i_field' % field]

        # For simple maglims
        release = self.config['data']['release'].lower()
        band = self.config['catalog']['mag_%i_band' % field]
        pixel_pix_name = 'PIX%i' % self.nside_pixel

        # If the data already has a healpix pixel assignment then use it
        # Otherwise recalculate...
        try:
            data = fitsio.read(infile, columns=[pixel_pix_name])
        except ValueError as e:
            logger.info(str(e))
            columns = [
                self.config['catalog']['lon_field'],
                self.config['catalog']['lat_field']
            ]
            data = fitsio.read(infile, columns=columns)[columns]
            pix = ang2pix(self.nside_pixel, data[columns[0]], data[columns[1]])
            data = recfuncs.rec_append_fields(data, pixel_pix_name, pix)

        #mask_pixels = np.arange( hp.nside2npix(self.nside_mask), dtype='int')
        mask_maglims = np.zeros(hp.nside2npix(self.nside_mask))

        out_pixels = np.zeros(0, dtype='int')
        out_maglims = np.zeros(0)

        # Find the objects in each pixel
        pixel_pix = data[pixel_pix_name]
        mask_pix = ugali.utils.skymap.superpixel(pixel_pix, self.nside_pixel,
                                                 self.nside_mask)
        count = Counter(mask_pix)
        pixels = sorted(count.keys())
        pix_digi = np.digitize(mask_pix, pixels).argsort()
        idx = 0
        min_num = 500
        signal_to_noise = 10.
        magerr_lim = 1 / signal_to_noise
        for pix in pixels:
            # Calculate the magnitude limit in each pixel
            num = count[pix]
            objs = data[pix_digi[idx:idx + num]]
            idx += num
            if simple:
                # Set constant magnitude limits
                logger.debug("Simple magnitude limit for %s" % infile)
                mask_maglims[pix] = MAGLIMS[release][band]
            elif num < min_num:
                logger.info('Found <%i objects in pixel %i' % (min_num, pix))
                mask_maglims[pix] = 0
            else:
                mag = objs[mag_column]
                magerr = objs[magerr_column]
                # Estimate the magnitude limit as suggested by:
                # https://deswiki.cosmology.illinois.edu/confluence/display/DO/SVA1+Release+Document
                # (https://desweb.cosmology.illinois.edu/confluence/display/Operations/SVA1+Doc)
                maglim = np.median(mag[(magerr > 0.9 * magerr_lim)
                                       & (magerr < 1.1 * magerr_lim)])

                # Alternative method to estimate the magnitude limit by fitting median
                #mag_min, mag_max = mag.min(),mag.max()
                #mag_bins = np.arange(mag_min,mag_max,0.1) #0.1086?
                #x,y = ugali.utils.binning.binnedMedian(mag,magerr,mag_bins)
                #x,y = x[~np.isnan(y)],y[~np.isnan(y)]
                #magerr_med = interp1d(x,y)
                #mag0 = np.median(x)
                #maglim = brentq(lambda a: magerr_med(a)-magerr_lim,x.min(),x.max(),disp=False)
                # Median from just objects near magerr cut

                mask_maglims[pix] = maglim

            logger.debug("%i (n=%i): maglim=%g" %
                         (pix, num, mask_maglims[pix]))
            subpix = ugali.utils.skymap.subpixel(pix, self.nside_mask,
                                                 self.nside_pixel)
            maglims = np.zeros(len(subpix)) + mask_maglims[pix]
            out_pixels = np.append(out_pixels, subpix)
            out_maglims = np.append(out_maglims, maglims)

        # Remove empty pixels
        logger.info("Removing empty pixels")
        idx = np.nonzero(out_maglims > 0)[0]
        out_pixels = out_pixels[idx]
        out_maglims = out_maglims[idx]

        # Remove pixels outside the footprint
        if self.footfile:
            logger.info("Checking footprint against %s" % self.footfile)
            lon, lat = pix2ang(self.nside_pixel, out_pixels)
            if self.config['coords']['coordsys'] == 'gal':
                ra, dec = gal2cel(lon, lat)
            else:
                ra, dec = lon, lat
            footprint = inFootprint(self.footprint, ra, dec)
            idx = np.nonzero(footprint)[0]
            out_pixels = out_pixels[idx]
            out_maglims = out_maglims[idx]

        logger.info("MAGLIM = %.3f +/- %.3f" %
                    (np.mean(out_maglims), np.std(out_maglims)))
        return out_pixels, out_maglims
def main(config, outpath, magflag, clobber=False):

    files = config['Runtime']['outpath']
    files = glob(files)
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    files = files[rank::size]

    cc = config['Cosmology']
    nb_config = config['NBody']

    cosmo = Cosmology(**cc)

    domain = Domain(cosmo, **nb_config.pop('Domain'))
    domain.decomp(comm, comm.rank, comm.size)
    d = domain.dummyDomain()
    nbody = NBody(cosmo, d, **nb_config)

    model = ADDGALSModel(nbody, **config['GalaxyModel']['ADDGALSModel'])
    filters = config['GalaxyModel']['ADDGALSModel']['colorModelConfig'][
        'filters']
    train = fitsio.read(config['GalaxyModel']['ADDGALSModel']
                        ['colorModelConfig']['trainingSetFile'])
    nk = len(filters)

    for f in files:

        pixnum = f.split('.')[-2]
        fname = '{}-{}.{}.fits'.format(outpath, magflag, pixnum)
        if os.path.exists(fname) & (not clobber):
            continue

        g = fitsio.read(f,
                        columns=[
                            'SEDID', 'Z', 'MAG_R_EVOL', 'MU', 'Z_COS', 'VX',
                            'VY', 'VZ', 'PX', 'PY', 'PZ'
                        ])
        ngal = len(g)

        pos = np.zeros((ngal, 3))
        vel = np.zeros((ngal, 3))
        pos[:, 0] = g['PX']
        pos[:, 1] = g['PY']
        pos[:, 2] = g['PZ']
        vel[:, 0] = g['VX']
        vel[:, 1] = g['VY']
        vel[:, 2] = g['VZ']

        v_r = np.sum(pos * vel, axis=1) / np.sqrt(np.sum(pos**2, axis=1))
        z_rsd = g['Z_COS'] + v_r * (1 + g['Z_COS']) / 299792.458
        del pos, vel, v_r

        m_r = np.copy(g['MAG_R_EVOL'])
        mu = np.copy(g['MU'])
        coeffs = train['COEFFS'][g['SEDID']]

        del g

        mags = np.zeros(ngal,
                        dtype=np.dtype([('TMAG', (np.float, nk)),
                                        ('AMAG', (np.float, nk)),
                                        ('LMAG', (np.float, nk)),
                                        ('OMAG', (np.float, nk)),
                                        ('OMAGERR', (np.float, nk)),
                                        ('FLUX', (np.float, nk)),
                                        ('IVAR', (np.float, nk)),
                                        ('Z', np.float)]))
        mags['Z'] = z_rsd
        z_a = copy(mags['Z'])
        z_a[z_a < 1e-6] = 1e-6

        mags['TMAG'], mags['AMAG'] = model.colorModel.computeMagnitudes(
            m_r, z_rsd, coeffs, filters)

        for i in range(len(filters)):
            mags['LMAG'][:, i] = mags['TMAG'][:, i] - 2.5 * np.log10(mu)

        fitsio.write(fname, mags, clobber=clobber)
Example #50
0
    def read_SV_fits(self):
        import fitsio

        subset_photoz_bin = False

        if subset_photoz_bin:
            # Subset to a single photo-z bin
            photoz_bin = 0

            Dataset.__init__(self, desfilename,
                             "DESData_colordiff_bin" + str(photoz_bin), '')

        data = fitsio.read(self.filename)

        # Mask out the bad objects
        SVA1_FLAG_mask = (data['SVA1_FLAG'] == 0)
        NGMIX_FLAG_mask = (data['NGMIX_FLAG'] == 0)
        PHOTOZ_FLAG_mask = (data['PHOTOZ_BIN'] > -1)
        data = data[SVA1_FLAG_mask & NGMIX_FLAG_mask & PHOTOZ_FLAG_mask]

        # Read in the desired columns.

        # from SVA_GOLD
        # WLINFO filtered by Umaa to omit objects with
        # SVA1_FLAG != 0
        # NGMIX_FLAG != 0
        # PHOTOZ_BIN != -1

        # We want R, G-R, R-I, I-Z
        self.data = data['MAG_AUTO_R']
        self.features = ['MAG_AUTO_R']

        # G-R
        self.data = np.vstack(
            [self.data, data['MAG_AUTO_G'] - data['MAG_AUTO_R']])
        self.features += ['G-R']

        # R-I
        self.data = np.vstack(
            [self.data, data['MAG_AUTO_R'] - data['MAG_AUTO_I']])
        self.features += ['R-I']

        # I-Z
        self.data = np.vstack(
            [self.data, data['MAG_AUTO_I'] - data['MAG_AUTO_Z']])
        self.features += ['I-Z']

        # MEAN_PHOTOZ
        self.data = np.vstack([self.data, data['MEAN_PHOTOZ']])
        self.features += ['MEAN_PHOTOZ']

        # PHOTOZ_BIN
        self.data = np.vstack([self.data, data['PHOTOZ_BIN']])
        self.features += ['PHOTOZ_BIN']

        # Data is d x n
        print self.data.shape
        # Scale some features as needed
        for f in self.features:
            if 'MAG_AUTO' in f:  # subtract the min
                minval = np.min(self.data[self.features.index(f), :])
                self.data[self.features.index(f), :] -= minval
                print 'Subtracting %f from %s.' % (minval, f)
                newf = f + '-sub%.2f' % minval
                self.features[self.features.index(f)] = newf
                f = newf
            print '%s range: ' % f,
            print self.data[self.features.index(f), :].min(),
            print self.data[self.features.index(f), :].max()

        self.labels = ['%d_%.6f_%.6f' % (id,ra,dec) for (id,ra,dec) in \
                         zip(data['COADD_OBJECTS_ID'],
                             data['RA'],
                             data['DEC'])]

        self.xvals = np.arange(self.data.shape[0]).reshape(-1, 1)
        self.features = np.array(self.features)

        if subset_photoz_bin:
            # Subset to a single photo-z bin
            keep = (self.data[np.where(self.features == 'PHOTOZ_BIN')[0][0],:] ==  \
                    photoz_bin)
            self.data = self.data[:, keep]
            # Still annoys me that you can't index a list with a list
            self.labels = [self.labels[k] for k in np.where(keep)[0]]

        # Remove the MEAN_PHOTOZ and PHOTOZ_BIN features
        print('Removing PHOTOZ features.')
        features_keep = ((self.features != 'PHOTOZ_BIN') &
                         (self.features != 'MEAN_PHOTOZ'))
        self.data = self.data[features_keep, :]
        self.features = self.features[features_keep]
        self.xvals = np.arange(self.data.shape[0]).reshape(-1, 1)
Example #51
0
def test_kg():
    # Use gamma_t(r) = gamma0 exp(-r^2/2r0^2) around a bunch of foreground lenses.
    # i.e. gamma(r) = -gamma0 exp(-r^2/2r0^2) (x+iy)^2/r^2
    # For each lens, we divide this by a random kappa value assigned to that lens, so
    # the final kg output shoudl be just gamma_t.

    nlens = 1000
    nsource = 30000
    r0 = 10.
    L = 50. * r0

    gamma0 = 0.05
    np.random.seed(8675309)
    xl = (np.random.random_sample(nlens)-0.5) * L
    yl = (np.random.random_sample(nlens)-0.5) * L
    kl = np.random.normal(0.23, 0.05, (nlens,) )
    xs = (np.random.random_sample(nsource)-0.5) * L
    ys = (np.random.random_sample(nsource)-0.5) * L
    g1 = np.zeros( (nsource,) )
    g2 = np.zeros( (nsource,) )
    for x,y,k in zip(xl,yl,kl):
        dx = xs-x
        dy = ys-y
        r2 = dx**2 + dy**2
        gammat = gamma0 * np.exp(-0.5*r2/r0**2) / k
        g1 += -gammat * (dx**2-dy**2)/r2
        g2 += -gammat * (2.*dx*dy)/r2

    lens_cat = treecorr.Catalog(x=xl, y=yl, k=kl, x_units='arcmin', y_units='arcmin')
    source_cat = treecorr.Catalog(x=xs, y=ys, g1=g1, g2=g2, x_units='arcmin', y_units='arcmin')
    kg = treecorr.KGCorrelation(bin_size=0.1, min_sep=1., max_sep=20., sep_units='arcmin',
                                verbose=1)
    kg.process(lens_cat, source_cat)

    r = kg.meanr
    true_gt = gamma0 * np.exp(-0.5*r**2/r0**2)

    print('kg.xi = ',kg.xi)
    print('kg.xi_im = ',kg.xi_im)
    print('true_gammat = ',true_gt)
    print('ratio = ',kg.xi / true_gt)
    print('diff = ',kg.xi - true_gt)
    print('max diff = ',max(abs(kg.xi - true_gt)))
    np.testing.assert_allclose(kg.xi, true_gt, rtol=0.1)
    np.testing.assert_allclose(kg.xi_im, 0., atol=1.e-2)

    # Check that we get the same result using the corr2 function:
    lens_cat.write(os.path.join('data','kg_lens.dat'))
    source_cat.write(os.path.join('data','kg_source.dat'))
    config = treecorr.read_config('configs/kg.yaml')
    config['verbose'] = 0
    config['precision'] = 8
    treecorr.corr2(config)
    corr2_output = np.genfromtxt(os.path.join('output','kg.out'), names=True, skip_header=1)
    print('kg.xi = ',kg.xi)
    print('from corr2 output = ',corr2_output['kgamT'])
    print('ratio = ',corr2_output['kgamT']/kg.xi)
    print('diff = ',corr2_output['kgamT']-kg.xi)
    np.testing.assert_allclose(corr2_output['kgamT'], kg.xi, rtol=1.e-3)

    print('xi_im from corr2 output = ',corr2_output['kgamX'])
    np.testing.assert_allclose(corr2_output['kgamX'], 0., atol=1.e-2)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check the fits write option
    out_file_name1 = os.path.join('output','kg_out1.fits')
    kg.write(out_file_name1)
    data = fitsio.read(out_file_name1)
    np.testing.assert_almost_equal(data['R_nom'], np.exp(kg.logr))
    np.testing.assert_almost_equal(data['meanR'], kg.meanr)
    np.testing.assert_almost_equal(data['meanlogR'], kg.meanlogr)
    np.testing.assert_almost_equal(data['kgamT'], kg.xi)
    np.testing.assert_almost_equal(data['kgamX'], kg.xi_im)
    np.testing.assert_almost_equal(data['sigma'], np.sqrt(kg.varxi))
    np.testing.assert_almost_equal(data['weight'], kg.weight)
    np.testing.assert_almost_equal(data['npairs'], kg.npairs)

    # Check the read function
    kg2 = treecorr.KGCorrelation(bin_size=0.1, min_sep=1., max_sep=20., sep_units='arcmin')
    kg2.read(out_file_name1)
    np.testing.assert_almost_equal(kg2.logr, kg.logr)
    np.testing.assert_almost_equal(kg2.meanr, kg.meanr)
    np.testing.assert_almost_equal(kg2.meanlogr, kg.meanlogr)
    np.testing.assert_almost_equal(kg2.xi, kg.xi)
    np.testing.assert_almost_equal(kg2.xi_im, kg.xi_im)
    np.testing.assert_almost_equal(kg2.varxi, kg.varxi)
    np.testing.assert_almost_equal(kg2.weight, kg.weight)
    np.testing.assert_almost_equal(kg2.npairs, kg.npairs)
    assert kg2.coords == kg.coords
    assert kg2.metric == kg.metric
    assert kg2.sep_units == kg.sep_units
    assert kg2.bin_type == kg.bin_type
Example #52
0
def test_direct_spherical():
    # Repeat in spherical coords

    ngal = 50
    s = 10.
    rng = np.random.RandomState(8675309)
    x = rng.normal(0,s, (ngal,) )
    y = rng.normal(0,s, (ngal,) ) + 200  # Put everything at large y, so small angle on sky
    z = rng.normal(0,s, (ngal,) )
    w = rng.random_sample(ngal)
    kap = rng.normal(0,3, (ngal,) )
    w = np.ones_like(w)

    ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)

    cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w, k=kap)

    min_sep = 1.
    bin_size = 0.2
    nrbins = 10
    nubins = 5
    nvbins = 5
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                  sep_units='deg', brute=True)
    kkk.process(cat)

    r = np.sqrt(x**2 + y**2 + z**2)
    x /= r;  y /= r;  z /= r

    true_ntri = np.zeros((nrbins, nubins, 2*nvbins), dtype=int)
    true_weight = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)
    true_zeta = np.zeros((nrbins, nubins, 2*nvbins), dtype=float)

    rad_min_sep = min_sep * coord.degrees / coord.radians
    for i in range(ngal):
        for j in range(i+1,ngal):
            for k in range(j+1,ngal):
                d12 = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
                d23 = np.sqrt((x[j]-x[k])**2 + (y[j]-y[k])**2 + (z[j]-z[k])**2)
                d31 = np.sqrt((x[k]-x[i])**2 + (y[k]-y[i])**2 + (z[k]-z[i])**2)

                d3, d2, d1 = sorted([d12, d23, d31])
                rindex = np.floor(np.log(d2/rad_min_sep) / bin_size).astype(int)
                if rindex < 0 or rindex >= nrbins: continue

                if [d1, d2, d3] == [d23, d31, d12]: ii,jj,kk = i,j,k
                elif [d1, d2, d3] == [d23, d12, d31]: ii,jj,kk = i,k,j
                elif [d1, d2, d3] == [d31, d12, d23]: ii,jj,kk = j,k,i
                elif [d1, d2, d3] == [d31, d23, d12]: ii,jj,kk = j,i,k
                elif [d1, d2, d3] == [d12, d23, d31]: ii,jj,kk = k,i,j
                elif [d1, d2, d3] == [d12, d31, d23]: ii,jj,kk = k,j,i
                else: assert False
                # Now use ii, jj, kk rather than i,j,k, to get the indices
                # that correspond to the points in the right order.

                u = d3/d2
                v = (d1-d2)/d3
                if ( ((x[jj]-x[ii])*(y[kk]-y[ii]) - (x[kk]-x[ii])*(y[jj]-y[ii])) * z[ii] +
                     ((y[jj]-y[ii])*(z[kk]-z[ii]) - (y[kk]-y[ii])*(z[jj]-z[ii])) * x[ii] +
                     ((z[jj]-z[ii])*(x[kk]-x[ii]) - (z[kk]-z[ii])*(x[jj]-x[ii])) * y[ii] ) > 0:
                    v = -v

                uindex = np.floor(u / bin_size).astype(int)
                assert 0 <= uindex < nubins
                vindex = np.floor((v+1) / bin_size).astype(int)
                assert 0 <= vindex < 2*nvbins

                www = w[i] * w[j] * w[k]
                zeta = www * kap[i] * kap[j] * kap[k]

                true_ntri[rindex,uindex,vindex] += 1
                true_weight[rindex,uindex,vindex] += www
                true_zeta[rindex,uindex,vindex] += zeta

    pos = true_weight > 0
    true_zeta[pos] /= true_weight[pos]

    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-4, atol=1.e-6)

    try:
        import fitsio
    except ImportError:
        print('Skipping FITS tests, since fitsio is not installed')
        return

    # Check that running via the corr3 script works correctly.
    config = treecorr.config.read_config('configs/kkk_direct_spherical.yaml')
    cat.write(config['file_name'])
    treecorr.corr3(config)
    data = fitsio.read(config['kkk_file_name'])
    np.testing.assert_allclose(data['r_nom'], kkk.rnom.flatten())
    np.testing.assert_allclose(data['u_nom'], kkk.u.flatten())
    np.testing.assert_allclose(data['v_nom'], kkk.v.flatten())
    np.testing.assert_allclose(data['ntri'], kkk.ntri.flatten())
    np.testing.assert_allclose(data['weight'], kkk.weight.flatten())
    np.testing.assert_allclose(data['zeta'], kkk.zeta.flatten(), rtol=1.e-3)

    # Repeat with binslop = 0
    # And don't do any top-level recursion so we actually test not going to the leaves.
    kkk = treecorr.KKKCorrelation(min_sep=min_sep, bin_size=bin_size, nbins=nrbins,
                                  sep_units='deg', bin_slop=0, max_top=0)
    kkk.process(cat)
    np.testing.assert_array_equal(kkk.ntri, true_ntri)
    np.testing.assert_allclose(kkk.weight, true_weight, rtol=1.e-5, atol=1.e-8)
    np.testing.assert_allclose(kkk.zeta, true_zeta, rtol=1.e-4, atol=1.e-6)
Example #53
0
pixfn      = '/global/cfs/cdirs/desi/target/catalogs/dr9m/0.42.0/pixweight/main/resolve/dark/pixweight-dark.fits'
hdr        = fits.getheader(pixfn,1)
nside,nest = hdr['HPXNSIDE'],hdr['HPXNEST']
print(nside,nest)

R_G=3.214 # http://legacysurvey.org/dr8/catalogs/#galactic-extinction-coefficients
R_R=2.165
R_Z=1.211

dr = '9'

print('test')

if dr == '9':
    #this will be needed no matter the sample, might want more
    rall = fitsio.read('/global/cfs/cdirs/desi/target/catalogs/dr9m/0.42.0/randoms/resolve/randoms-randomized-1.fits')
    print(len(rall))
    sdir = '/project/projectdirs/desi/users/ajross/dr9/'

def mask(dd,mb=[1,5,6,7,11,12,13]):
    keep = (dd['NOBS_G']>0) & (dd['NOBS_R']>0) & (dd['NOBS_Z']>0)
    print(len(dd[keep]))
    
    keepelg = keep
    for bit in mb:
        keepelg &= ((dd['MASKBITS'] & 2**bit)==0)
    print(len(dd[keepelg]))
    dd = dd[keepelg] 
    return dd       

rall = mask(rall)    
Example #54
0
def densvsinput_pix(type,parl,wsel,reg=None,ff='targetDR9m42.fits',xlab='',vmin=None,vmax=None,ebvcut=None,edscut=None,sn2cut=None,fpsfcut=None,gfluxcut=None,rfluxcut=None,gbcut=None,nbin=10,weights=None,titl=''):        
    #input custom map/mask
    ft = fitsio.read(sdir+type+ff)
    print(len(ft))
    rl = rall
    if reg:
        wr = rall['PHOTSYS'] == reg
        rl = rl[wr]
        wd = ft['PHOTSYS'] == reg
        ft = ft[wd]
    if gfluxcut:
        wg = ft['FLUX_G']/ft['MW_TRANSMISSION_G'] > gfluxcut
        print(len(ft))      
        ft = ft[wg]
        print(len(ft))
    if rfluxcut:
        wg = ft['FLUX_R']/ft['MW_TRANSMISSION_R'] > rfluxcut
        ft = ft[wg]
        

    
            
        
    rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
    rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
    dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
    dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
    pixlr = np.zeros(12*nside*nside)
    pixlg = np.zeros(12*nside*nside)

    if weights is None:
        weights = np.ones(len(pixlr))
    for pix in rpix:
        pixlr[pix] += 1.
    print('randoms done')
    for i in range(0,len(dpix)): 
        pix = dpix[i]
        pixlg[pix] += 1.
    
    wp = wsel
    wp &= (pixlr > 0) & (weights*0 == 0)

    parv = fitsio.read(pixfn)
    ebv = parv['EBV']
    sn2tf = 10.**(-0.4*R_G*ebv*2.)*parv['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv['PSFDEPTH_Z']
    print(len(parv[wp]))
    if sn2cut:
        wp &= (sn2tf > sn2cut)
        
    if fpsfcut:
        wpsf = ft['MORPHTYPE'] == 'PSF'
        pixlgp = np.zeros(12*nside*nside)
        dpixp = dpix[wpsf]
        for i in range(0,len(dpixp)): 
            pix = dpixp[i]
            pixlgp[pix] += 1.
        fpsf = pixlgp/pixlg
        wp &= (fpsf < fpsfcut)
    if ebvcut:
        wp &= (parv['EBV'] < ebvcut)

    if edscut:
        eds = parv['EBV']/parv['STARDENS']
        wp &= (eds < edscut)
    

        

    parv = parl 

    wp &= parv !=0
    wp &= parv*0 == 0
    print(len(parv[wp]))
    
    if vmin is None:
        vmin = np.min(parv[wp])
    if vmax is None:
        vmax = np.max(parv[wp])
    parv = parv[wp]
    rh,bn = np.histogram(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp])
    dh,db = np.histogram(parv,bins=bn,weights=pixlg[wp]*weights[wp])
    norm = sum(rh)/sum(dh)
    sv = dh/rh*norm
    ep = np.sqrt(dh)/rh*norm
    bc = []
    for i in range(0,len(bn)-1):
        bc.append((bn[i]+bn[i+1])/2.)

    plt.errorbar(bc,sv-1.,ep,fmt='ko')
    plt.hist(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp]*0.2*np.ones(len(pixlr[wp]))/np.max(rh))
    plt.ylim(-.3,.3)
    plt.xlabel(xlab)
    plt.ylabel('Ngal/<Ngal> - 1')
    plt.title(type+' in '+reg + ' footprint, using pixelized map'+titl)
    plt.show()
    wv = (parv>=vmin) & (parv <=vmax)
    frac = sum(pixlr[wp][~wv])/sum(pixlr[wp])
    print('fraction of randoms not included in plot: '+str(frac))
    return bc,sv,ep
Example #55
0
def test_meanify():

    if __name__ == '__main__':
        rtol = 1.e-1
        atol = 2.e-2
        bin_spacing = 30  # arcsec
    else:
        rtol = 1.e-1
        atol = 3.e-2
        bin_spacing = 150  # arcsec

    psf_file = 'test_mean_*.piff'
    average_file = 'average.fits'

    psfs_list = sorted(glob.glob(os.path.join('output', 'test_mean_*.piff')))

    config0 = {
        'output' : {
            'file_name' : psfs_list,
        },
        'hyper' : {
            'file_name' : 'output/'+average_file,
        }}

    config1 = {
        'output' : {
            'file_name' : psf_file,
            'dir': 'output',
        },
        'hyper' : {
            'file_name' : average_file,
            'dir': 'output',
            'bin_spacing' : bin_spacing,
            'statistic' : 'mean',
            'params_fitted': [0, 2]
        }}

    config2 = {
        'output' : {
            'file_name' : psf_file,
            'dir': 'output',
        },
        'hyper' : {
            'file_name' : average_file,
            'dir': 'output',
            'bin_spacing' : bin_spacing,
            'statistic' : 'median',
        }}

    for config in [config0, config1, config2]:
        piff.meanify(config)
        ## test if found initial average
        average = fitsio.read(os.path.join('output',average_file))
        params0 = make_average(coord=average['COORDS0'][0] / 0.26, gp=False)
        keys = ['hlr', 'g1', 'g2']
        for i,key in enumerate(keys):
            if config == config1 and i == 1:
                np.testing.assert_allclose(np.zeros(len(average['PARAMS0'][0][:,i])),
                                           average['PARAMS0'][0][:,i], rtol=0, atol=0)
            else:
                np.testing.assert_allclose(params0[key], average['PARAMS0'][0][:,i],
                                           rtol=rtol, atol=atol)

    ## gaussian process testing of meanify 
    np.random.seed(68)
    x = np.random.uniform(0, 2048, size=1000)
    y = np.random.uniform(0, 2048, size=1000)
    coord = np.array([x,y]).T
    average = make_average(coord=coord)

    stars = params_to_stars(average, noise=0.0, rng=None)
    stars_training = stars[:900]
    stars_validation = stars[900:]

    fit_hyp = [False, True]

    for fit in fit_hyp:
        gp = piff.GPInterp2pcf(kernel="0.009 * RBF(300.*0.26)",
                               optimize=fit_hyp, white_noise=1e-5, average_fits='output/average.fits')
        gp.initialize(stars_training)
        gp.solve(stars_training)
        stars_interp = gp.interpolateList(stars_validation)
        params_interp = np.array([s.fit.params for s in stars_interp])
        params_validation = np.array([s.fit.params for s in stars_validation])
        params_training = np.array([s.fit.params for s in stars_training])
        np.testing.assert_allclose(params_interp, params_validation, rtol=rtol, atol=atol)
Example #56
0
def densvsimpar_pix(type,par,reg=None,ff='targetDR9m42.fits',vmin=None,vmax=None,ebvcut=None,edscut=None,sn2cut=None,fpsfcut=None,gfluxcut=None,rfluxcut=None,gbcut=None,nbin=10,weights=None,titl=''):        
    ft = fitsio.read(sdir+type+ff)
    print(len(ft))
    rl = rall
    if reg:
        wr = rall['PHOTSYS'] == reg
        rl = rl[wr]
        wd = ft['PHOTSYS'] == reg
        ft = ft[wd]
    if gfluxcut:
        wg = ft['FLUX_G']/ft['MW_TRANSMISSION_G'] > gfluxcut
        print(len(ft))      
        ft = ft[wg]
        print(len(ft))
    if rfluxcut:
        wg = ft['FLUX_R']/ft['MW_TRANSMISSION_R'] > rfluxcut
        ft = ft[wg]

    
            
        
    rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
    rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
    dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
    dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
    pixlr = np.zeros(12*nside*nside)
    pixlg = np.zeros(12*nside*nside)
    if par.split('-')[0] == 'VAR' or par.split('-')[0] == 'STDPER':
        pixlp = np.zeros(12*nside*nside)
        pixlv = np.zeros(12*nside*nside)
    if weights is None:
        weights = np.ones(len(pixlr))
    for pix in rpix:
        pixlr[pix] += 1.
    print('randoms done')
    for i in range(0,len(dpix)): 
        pix = dpix[i]
        pixlg[pix] += 1.
        if par.split('-')[0] == 'VAR' or par.split('-')[0] == 'STDPER':
            pixlp[pix] += ft[i][par.split('-')[1]]
            pixlv[pix] += ft[i][par.split('-')[1]]**2.
    
    wp = (pixlr > 0) & (weights*0 == 0)

    parv = fitsio.read(pixfn)
    ebv = parv['EBV']
    sn2tf = 10.**(-0.4*R_G*ebv*2.)*parv['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv['PSFDEPTH_Z']
    print(len(parv[wp]))
    if sn2cut:
        wp &= (sn2tf > sn2cut)
        
    if fpsfcut:
        wpsf = ft['MORPHTYPE'] == 'PSF'
        pixlgp = np.zeros(12*nside*nside)
        dpixp = dpix[wpsf]
        for i in range(0,len(dpixp)): 
            pix = dpixp[i]
            pixlgp[pix] += 1.
        fpsf = pixlgp/pixlg
        wp &= (fpsf < fpsfcut)
    if ebvcut:
        wp &= (parv['EBV'] < ebvcut)

    if edscut:
        eds = parv['EBV']/parv['STARDENS']
        wp &= (eds < edscut)

    if gbcut is not None:
        

        print('applying background cut of '+str(gbcut))
        rf = fitsio.read('/global/u2/r/rongpu/share/desi/sky_residual_dr9_partial/sky_residual_dr9_north_256.fits')
        gb = np.zeros(12*nside*nside)
        for i in range(0,len(rf)):
            px = rf['hp_idx'][i]
            gb[px] = rf['g_blobsky'][i]  
        gb = hp.reorder(gb,r2n=True)    
        wp &= (gb != 0)  
        wp &= (gb < gbcut)    

        
    print(len(parv[wp]))
    if len(par.split('-')) > 1: 
        
        if par.split('-')[0] == 'VAR':
            parv = pixlv[wp]/pixlg[wp]-(pixlp[wp]/pixlg[wp])**2.  
        elif par.split('-')[0] == 'STDPER':
            var = pixlv[wp]/pixlg[wp]-(pixlp[wp]/pixlg[wp])**2. 
            parv = var**.5/(pixlp[wp]/pixlg[wp])
        elif par.split('-')[1] == 'X':
            parv = parv[wp][par.split('-')[0]]*parv[wp][par.split('-')[2]]
        elif par.split('-')[1] == 'DIV':
            parv = parv[wp][par.split('-')[0]]/parv[wp][par.split('-')[2]]
    elif par == 'PSFTOT':
        parv = (parv[wp]['PSFSIZE_G'])*(parv[wp]['PSFSIZE_R'])*(parv[wp]['PSFSIZE_Z'])
    elif par == 'SN2TOT_FLAT':
        ebv = parv[wp]['EBV']
        parv = 10.**(-0.4*R_G*ebv*2.)*parv[wp]['PSFDEPTH_G'] + 10.**(-0.4*R_R*ebv*2.)*parv[wp]['PSFDEPTH_R'] + 10.**(-0.4*R_Z*ebv*2.)*parv[wp]['PSFDEPTH_Z']

    elif par == 'SN2TOT_G':
        ebv = parv[wp]['EBV']
        parv = 10.**(-0.4*R_G*ebv*2.)*parv[wp]['PSFDEPTH_G']

    elif par == 'fracPSF':
        wpsf = ft['MORPHTYPE'] == 'PSF'
        pixlgp = np.zeros(12*nside*nside)
        dpixp = dpix[wpsf]
        for i in range(0,len(dpixp)): 
            pix = dpixp[i]
            pixlgp[pix] += 1.
        parv = pixlgp[wp]/pixlg[wp]
    else:
        parv = parv[wp][par]

    wo = parv*0 == 0
    if vmin is None:
        vmin = np.min(parv[wo])
    if vmax is None:
        vmax = np.max(parv[wo])
    rh,bn = np.histogram(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp])
    dh,db = np.histogram(parv,bins=bn,weights=pixlg[wp]*weights[wp])
    norm = sum(rh)/sum(dh)
    sv = dh/rh*norm
    ep = np.sqrt(dh)/rh*norm
    bc = []
    for i in range(0,len(bn)-1):
        bc.append((bn[i]+bn[i+1])/2.)

    plt.errorbar(bc,sv-1.,ep,fmt='ko')
    plt.hist(parv,bins=nbin,range=(vmin,vmax),weights=pixlr[wp]*0.2*np.ones(len(pixlr[wp]))/np.max(rh))
    plt.ylim(-.3,.3)
    plt.xlabel(par)
    plt.ylabel('Ngal/<Ngal> - 1')
    plt.title(type+' in '+reg + ' footprint, using pixelized map'+titl)
    plt.show()
    wv = (parv>=vmin) & (parv <=vmax)
    frac = sum(pixlr[wp][~wv])/sum(pixlr[wp])
    print('fraction of randoms not included in plot: '+str(frac))
    return bc,sv,ep
def main():
    ns = parse_args()
        
    if ns.ignore_errors:
        print("Warning: *** Will ignore broken tractor catalog files ***")
        print("         *** Disable -I for final data product.         ***")

    bricks = list_bricks(ns)

    tree, nobj, morecols = read_external(ns.external, ns)

    # get the data type of the match
    brickname, path = bricks[0]
    peek = fitsio.read(path, 1, upper=True)
    matched_catalog = sharedmem.empty(nobj, dtype=peek.dtype)
    matched_catalog['OBJID'] = -1

    matched_distance = sharedmem.empty(nobj, dtype='f4')

    # convert to radian
    tol = ns.tolerance / (60. * 60.)  * (np.pi / 180)

    matched_distance[:] = tol
    nprocessed = np.zeros((), dtype='i8')
    nmatched = np.zeros((), dtype='i8')
    ntotal = np.zeros((), dtype='i8')
    t0 = time()

    with sharedmem.MapReduce(np=ns.numproc) as pool:
        def work(brickname, path):
            try:
                objects = fitsio.read(path, 1, upper=True)
            except:
                if ns.ignore_errors:
                    print ("IO Error on %s" %path)
                    return None, None, None
                else:
                    raise
        
            pos = radec2pos(objects['RA'], objects['DEC'])
            d, i = tree.query(pos, 1)
            assert (objects['OBJID'] != -1).all()
            with pool.critical:
                mask = d < matched_distance[i]
                mask &= objects['BRICK_PRIMARY'] 
                i = i[mask]
                matched_catalog[i] = objects[mask]
                matched_distance[i] = d[mask]
            matched = mask.sum()

            return brickname, matched, len(objects)

        def reduce(brickname, matched, total):
            if brickname is None:
                return
            nprocessed[...] += 1
            nmatched[...] += matched
            ntotal[...] += total
            if ns.verbose:
                if nprocessed % 1000 == 0:
                    print("Processed %d files, %g / second, matched %d / %d objects."
                        % (nprocessed, nprocessed / (time() - t0), nmatched, ntotal)
                        )

        pool.map(work, bricks, star=True, reduce=reduce)

        nrealmatched = (matched_catalog['OBJID'] != -1).sum()
        if ns.verbose:
            print("Processed %d files, %g / second, matched %d / %d objects into %d slots."
                % (nprocessed, nprocessed / (time() - t0), 
                    nmatched, ntotal, 
                    nrealmatched)
                )

        try:
            os.makedirs(os.path.dirname(ns.dest))
        except OSError:
            pass
        header = {}

        header['NMATCHED'] = nrealmatched
        header['NCOLLISION'] = nmatched - nrealmatched
        header['TOL_ARCSEC'] = ns.tolerance

        # Optionally add the new columns
        if len(morecols) > 0:
            newdtype = matched_catalog.dtype.descr
    
            for coldata, col in zip( morecols, ns.copycols ):
                newdtype = newdtype + [(col, coldata.dtype)]
            newdtype = np.dtype(newdtype)
        
            _matched_catalog = np.empty(matched_catalog.shape, dtype=newdtype)
            for field in matched_catalog.dtype.fields:
                _matched_catalog[field] = matched_catalog[field]
            for coldata, col in zip( morecols, ns.copycols ):
                _matched_catalog[col] = coldata
                
            matched_catalog = _matched_catalog.copy()
            del _matched_catalog

        for format in ns.format:
            save_file(ns.dest, matched_catalog, header, format)
Example #58
0
def plotvshp_compmc(type,sys,rng,mcl=None,ws=None,reg=None,ff='targetDR9m42.fits',gdzm=0,ebvm=100,title='',effac=1.,mingd=0,maxgd=1.e6,minpsfg=0,maxpsfg=100,south=True):
    ft = fitsio.read(sdir+type+ff)
    print(len(ft))
    rl = rall
    if reg:
        wr = rall['PHOTSYS'] == reg
        rl = rl[wr]
        wd = ft['PHOTSYS'] == reg
        ft = ft[wd]
    rth,rphi = radec2thphi(rl['RA'],rl['DEC'])
    rpix = hp.ang2pix(nside,rth,rphi,nest=nest)
    dth,dphi = radec2thphi(ft['RA'],ft['DEC'])
    dpix = hp.ang2pix(nside,dth,dphi,nest=nest)
    r1 = np.zeros(12*nside*nside)
    d1= np.zeros(12*nside*nside)
    for pix in rpix:
        r1[pix] += 1.
    print('randoms done')
    for pix in dpix:
        d1[pix] += 1.

    hpq = fitsio.read(pixfn)
    #hpq = parv[par]

    
    w = r1 > 0
    print(len(hpq[w]))
    w &= hpq['GALDEPTH_Z'] > gdzm
    w &= hpq['GALDEPTH_G'] > mingd
    w &= hpq['GALDEPTH_G'] < maxgd
    w &= hpq['EBV'] < ebvm
    w &= hpq['PSFSIZE_G'] > minpsfg
    w &= hpq['PSFSIZE_G'] < maxpsfg
    if ws is not None:
        w &= ws*0 == 0
    if mcl is not None:
        w &= mcl*0 == 0
        w &= mcl > 0
    print(len(hpq[w]))
    #w 
    if sys != 'gdc' and sys != 'rdc' and sys != 'zdc' and sys != 'dg' and sys != 'dr' and sys != 'dz' and sys != 'dgr' and sys != 'drz' and sys != 'dgz':
        sm = hpq[w][sys]
        xlab = sys
    else:
        if sys == 'gdc':
            print('g band depth, extinction corrected')
            sm = hpq[w]['GALDEPTH_G']*10.**(-0.4*R_G*hpq[w]['EBV'])
            xlab = 'g band depth, extinction corrected'
        if sys == 'rdc':
            sm = hpq[w]['GALDEPTH_R']*10.**(-0.4*R_R*hpq[w]['EBV'])
            xlab = 'r band depth, extinction corrected'
        if sys == 'zdc':
            sm = hpq[w]['GALDEPTH_Z']*10.**(-0.4*R_Z*hpq[w]['EBV'])
            xlab = 'z band depth, extinction corrected'
        if sys == 'dg':
            sm = dg[w]
            xlab = 'g band PS1 residual'
        if sys == 'dr':
            sm = dr[w]
            xlab = 'r band PS1 residual'
        if sys == 'dz':
            sm = dz[w]
            xlab = 'z band PS1 residual'
        if sys == 'dgr':
            sm = dg[w]-dr[w]
            xlab = 'g-r band PS1 residual'
        if sys == 'drz':
            sm = dr[w]-dz[w]
            xlab = 'r-z band PS1 residual'
        if sys == 'dgz':
            sm = dg[w]-dz[w]
            xlab = 'g-z band PS1 residual'

    ds = np.ones(len(d1))
    print(len(ds),len(d1),len(w),len(sm))
    hdnoc = np.histogram(sm,weights=d1[w],range=rng)
    #print(hd1)
    hr1 = np.histogram(sm,weights=r1[w],bins=hdnoc[1],range=rng)



    xl = []
    for i in range(0,len(hr1[0])):
        xl.append((hr1[1][i]+hr1[1][i+1])/2.)

    plt.errorbar(xl,hdnoc[0]/hr1[0]/(sum(d1[w])/sum(r1[w])),np.sqrt(hdnoc[0])/hr1[0]/(sum(d1[w])/sum(r1[w])),fmt='ko',label='raw')
    if ws is not None:
        ds = ws
        hd1 = np.histogram(sm,weights=d1[w]*ds[w],bins=hdnoc[1],range=rng)
        plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w])/sum(r1[w])),'b-',label='+ EBV weights')

    #hd1 = np.histogram(sm,weights=d1[w]*ds[w],bins=hdnoc[1],range=rng)
    #plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w])/sum(r1[w])),'k--',label='with stellar density weights')
    if mcl is not None:
        dmcse = mcl**effac
        hd1 = np.histogram(sm,weights=d1[w]/dmcse[w],bins=hdnoc[1],range=rng)
        plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]/dmcse[w])/sum(r1[w])),'r-',label='+MC weights')
    if ws is not None and mcl is not None:
        hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmcse[w],bins=hdnoc[1],range=rng)
        plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmcse[w])/sum(r1[w])),'-',color='purple',label='+MC weights + EBV weights')
    #dmcs = mcls**effac
    #hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmcs[w],bins=hdnoc[1],range=rng)
    #plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmcs[w])/sum(r1[w])),'b-',label='+MC; sed w ext sigma')
    #dmco = mclo**effac
    #hd1 = np.histogram(sm,weights=d1[w]*ds[w]/dmco[w],bins=hdnoc[1],range=rng)
    #plt.plot(xl,hd1[0]/hr1[0]/(sum(d1[w]*ds[w]/dmco[w])/sum(r1[w])),'-',color='purple',label='old MC')
    
    #plt.title(str(mp)+reg)
    plt.plot(xl,np.ones(len(xl)),'k:',label='null')
    plt.legend()#(['raw','with stellar density weights','+sed ext MC','just sed MC','old MC','null']))
    plt.ylabel('relative density')
    plt.xlabel(xlab)
    plt.ylim(0.7,1.3)
    plt.title(title)
    plt.show()    
            

    
    
Example #59
0
def process_field(results, errors, run, camcol, filter, field, params_bright,
                  params_dim, params_removestars):
    """Calls the correct order of actions needed to detect trails per frame.
    Writes  "results.txt" and "errors.txt".

    Order of operations:

      1. Check if .fits file exists

         a. If it doesn't see a compressed .bz2 version exists. If it does
         b. uncompress it to $FITS_DUMP env. var location. If the env. var.
            was not set, decompress to the fits_dump folder in the package

      2. Remove known object from the image by drawing squares over them
      3. Try detecting a bright trail (very fast). If successful write results
         and stop.
      4. if bright detection is not made, try detecting a dim trail. If found
         write results and stop, otherwise just stop.
      5. clean-up (remove unpacked fits, close files, dump silenced errors)

    Parameters
    ----------
    results : file
        a file object, a stream or any such counterpart to which results
        will be written
    errors : file
        a file object, stream, or any such counterpart to which errors will be
        written
    run : int
        run designation
    camcol : int
        camcol designation, 1 to 6
    filter : str
        filter designation, one of ugriz
    params_bright : dict
        dictionary containing execution parameters required by process_bright
    params_dim : dict
        dictionary containing execution parameters required by process_dim
    params_removestars : dict
        dictionary containing execution parameters required by remove_stars
    """
    removefits = False
    try:
        origfitspath = files.filename('frame',
                                      run=run,
                                      camcol=camcol,
                                      field=field,
                                      filter=filter)

        # downright sadness that fitsio doesn't support bz2 compressed fits'
        # if there is no fits, but only fits.bz2 you have to open, decompress,
        # save, and reopen with fitsio. We also don't want to delete existing
        # unpacked fits files so we set the removefits flag to True only when
        # *we* created a file by unpacking
        if not os.path.exists(origfitspath):

            bzpath = origfitspath + ".bz2"
            if not os.path.exists(bzpath):
                errmsg = ("File {0} or its bz2 compressed version not found. "
                          "Are you sure they exist?")
                raise FileNotFoundError(errmsg.format(origfitspath))

            with open(bzpath, "rb") as compressedfits:
                fitsdata = bz2.decompress(compressedfits.read())

                # see if user uncompressed fits dumping location is set
                try:
                    fitsdmp = os.environ["FITS_DUMP"]
                except KeyError:
                    # if not default to the fits_dump dir in the package
                    modloc = os.path.split(__file__)[0]
                    fitsdmp = os.path.join(modloc, "fits_dump/")

                fitspath = os.path.join(fitsdmp,
                                        os.path.split(origfitspath)[-1])

                # save the uncompressed fits
                with open(fitspath, "wb") as decompressed:
                    decompressed.write(fitsdata)

                # setting the flag here, after we have certainly written and
                # closed the file succesfully helps escape any errors later on
                # in case we try to remove unexsiting file
                removefits = True
        else:
            fitspath = origfitspath

        img = fitsio.read(fitspath)
        h = fitsio.read_header(fitspath)
        printit = (f"{run} {camcol} {filter} {field} {h['TAI']} {h['CRPIX1']} "
                   "{h['CRPIX2']} {h['CRVAL1']} {h['CRVAL2']} {h['CD1_1']} "
                   "{h['CD1_2']} {h['CD2_1']} {h['CD2_2']} ")

        img = remove_stars(img, run, camcol, filter, field,
                           **params_removestars)

        # WARNING mirror the image vertically
        # it seems CV2 and FITSIO set different pix coords
        img = cv2.flip(img, 0)
        detection, res = process_field_bright(img, **params_bright)
        if detection:
            results.write(printit +
                          f"{res['x1']} {res['y1']} {res['x2']} {res['y2']}\n")
        else:
            detection, res = process_field_dim(img, **params_dim)
            if detection:
                results.write(
                    printit +
                    f"{res['x1']} {res['y1']} {res['x2']} {res['y2']}\n")

    except Exception as e:
        if params_bright["debug"] or params_dim["debug"]:
            traceback.print_exc(limit=3)
        errors.write(f"{run} {camcol} {filter} {field}\n")
        traceback.print_exc(limit=3, file=errors)
        errors.write(str(e) + "\n\n")
        pass

    finally:
        if removefits:
            os.remove(fitspath)
data_path = glob.glob(os.path.join(o.input_path, 'lsst_e_*.fits.gz'))
parent_path = os.path.dirname(o.input_path)
instcat_path = glob.glob(os.path.join(parent_path, 'instCat/phosim*.txt'))[0]
phosim_pars = pd.read_table(instcat_path, index_col=0, header=None, sep=' ').T
seeing = float(phosim_pars['seeing'].values[0])
bandpass = bandpass_all[int(phosim_pars['filter'].values[0])]
psf_fwhm = []
mean_bkg = []
median_bkg = []
bkg_noise = []

for i, dp in enumerate(data_path):
    if i % 10 == 0:
        print('Analyzed %d of %d images' % (i, len(data_path)))
    data, h = fitsio.read(dp, ext=0, header=True)
    psf_fwhm.append(get_total_seeing(h))
    aux1, aux2, aux3 = compute_bkg(data)
    mean_bkg.append(aux1)
    median_bkg.append(aux2)
    bkg_noise.append(aux3)

psf_fwhm = np.array(psf_fwhm)
mean_bkg = np.array(mean_bkg)
median_bkg = np.array(median_bkg)
bkg_noise = np.array(bkg_noise)

if imsim_installed:
    mean_bkg_imsim, median_bkg_imsim, bkg_noise_imsim = compare_with_imsim(
        phosim_pars)
else: