コード例 #1
0
def make_meas_missing(exposure):
    """ Make meas_missing.fits for exposures."""

    t00 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    version = 'v3'

    # Load the exposures table
    print('Loading exposure table')
    expstr = fits.getdata(
        '/net/dl2/dnidever/nsc/instcal/' + version +
        '/lists/nsc_v3_exposure_table.fits.gz', 1)

    # Make sure it's a list
    if type(exposure) is str: exposure = [exposure]

    # Match exposures to exposure catalog
    ind1, ind2 = dln.match(expstr['EXPOSURE'], exposure)
    nmatch = len(ind1)
    print(str(nmatch) + ' exposures found in exposure table')

    # Loop over files
    for i in range(nmatch):
        exp1 = expstr['EXPOSURE'][ind1[i]]
        instcode = expstr['INSTRUMENT'][ind1[i]]
        dateobs = expstr['DATEOBS'][ind1[i]]
        night = dateobs[0:4] + dateobs[5:7] + dateobs[8:10]
        measfile = '/net/dl2/dnidever/nsc/instcal/v3/' + instcode + '/' + night + '/' + exp1 + '/' + exp1 + '_meas.fits.gz'
        if os.path.exists(measfile):
            meas = fits.getdata(measfile, 1)
            objectid = np.char.array(meas['OBJECTID']).strip()
            miss, = np.where(objectid == '')
            if len(miss) > 0:
                print(
                    str(i + 1) + ' ' + exp1 + ' ' + str(len(miss)) +
                    ' missing')
                missfile = '/net/dl2/dnidever/nsc/instcal/v3/' + instcode + '/' + night + '/' + exp1 + '/' + exp1 + '_meas_missing.fits'
                print('  Writing missing catalog to ' + missfile)
                Table(meas[miss]).write(missfile, overwrite=True)
            else:
                print(str(i + 1) + ' ' + exp1 + ' none missing')
        else:
            print(measfile + ' NOT FOUND')
コード例 #2
0
def make_4panelplot(obj, outdir):
    matplotlib.use('Agg')
    #params = {'tex.usetex': True}
    #plt.rcParams.update(params)
    #plt.rc(usetex = True)

    idv = obj['id']

    #gather all data used
    dataselect = "select mjd,ra,dec,mag_auto,raerr,decerr,filter,fwhm,exposure from nsc_dr2.meas where objectid='" + idv + "'"
    meas = qc.query(sql=dataselect, fmt='table', profile='db01')
    #obj = qc.query(sql="select id,pmra,pmdec from nsc_dr2.object where id='"+ idv +"'",fmt='table',profile='db01')
    #obj = qc.query(sql="select id,pmra,pmdec from nsc_dr2.object where id='"+ idv +"'",fmt='table')

    # Make cut on FWHM
    # maybe only use values for 0.5*fwhm_chip to 1.5*fwhm_chip
    sql = "select chip.* from nsc_dr2.chip as chip join nsc_dr2.meas as meas on chip.exposure=meas.exposure and chip.ccdnum=meas.ccdnum"
    sql += " where meas.objectid='" + idv + "'"
    chip = qc.query(sql=sql, fmt='table')
    ind3, ind4 = dln.match(chip['exposure'], meas['exposure'])
    si = np.argsort(ind4)  # sort by input meas catalog
    ind3 = ind3[si]
    ind4 = ind4[si]
    chip = chip[ind3]
    meas = meas[ind4]
    gdfwhm, = np.where((meas['fwhm'] > 0.2 * chip['fwhm'])
                       & (meas['fwhm'] < 2.0 * chip['fwhm']))
    if len(gdfwhm) == 0:
        print('All measurements have bad FWHM values')
        return
    if len(gdfwhm) < len(meas):
        print('Removing ' + str(len(meas) - len(gdfwhm)) +
              ' measurements with bad FWHM values')
        meas = meas[gdfwhm]

    plt.subplots(2, 2, figsize=(12, 8))
    plt.subplots_adjust(hspace=.4, wspace=.4)
    plt.suptitle(idv)
    meas["mjd"] -= min(meas["mjd"])
    mjd = (meas["mjd"])
    cenra = np.mean(meas["ra"])
    cendec = np.mean(meas["dec"])
    mag = meas["mag_auto"]
    filters = meas["filter"]
    #colors = ["b", "g", "r", "c", "y"]
    colordict = {
        'u': 'c',
        'g': 'b',
        'r': 'g',
        'i': 'y',
        'z': 'orange',
        'Y': 'r',
        'VR': 'purple'
    }
    ra = meas["ra"]
    raerr = meas['raerr']
    dra = ra - cenra
    dra *= 3600 * np.cos(np.deg2rad(cendec))
    dec = meas["dec"]
    decerr = meas['decerr']
    ddec = dec - cendec
    ddec *= 3600
    t = meas["mjd"] - np.mean(meas["mjd"])
    pmra = obj["pmra"] / 1000 / 365.2425  # mas/yr -> as/day
    pmdec = obj["pmdec"] / 1000 / 365.2425

    size = 15

    # filter some
    goodind = np.where(np.logical_and(abs(ddec) < 500, abs(dra) < 500))
    ddec = ddec[goodind]
    dra = dra[goodind]
    raerr = raerr[goodind]
    decerr = decerr[goodind]
    mjd = mjd[goodind]
    filters = filters[goodind]
    mag = mag[goodind]
    meandra = np.mean(dra)
    meanddec = np.mean(ddec)

    # Unique filters
    # put them in this order [u,g,r,i,z,Y,VR]
    ufilter = []
    for filt in ['u', 'g', 'r', 'i', 'z', 'Y', 'VR']:
        if filt in filters:
            ufilter.append(filt)

    # ra dec plot
    plt.subplot(2, 2, 1)
    plt.errorbar(dra,
                 ddec,
                 raerr,
                 decerr,
                 fmt='none',
                 ecolor='lightgray',
                 elinewidth=1,
                 capsize=0,
                 alpha=0.5,
                 zorder=0)
    plt.scatter(dra, ddec, c=mjd, s=size, zorder=1)
    diffra = max(dra) - min(dra)
    diffdec = max(ddec) - min(ddec)
    plt.xlim(min(dra) - diffra / 4, max(dra) + diffra / 4)
    plt.ylim(min(ddec) - diffdec / 4, max(ddec) + diffdec / 4)
    m, b = np.polyfit(dra, ddec, 1)
    plt.plot(dra, m * dra + b, c="k")
    plt.colorbar(label=r'$\Delta$ MJD')
    plt.xlabel("$\Delta$ RA (arcsec)")
    plt.ylabel("$\Delta$ DEC (arcsec)")

    # ra mjd plot
    plt.subplot(2, 2, 2)
    plt.errorbar(mjd,
                 dra,
                 yerr=raerr,
                 fmt='none',
                 ecolor='lightgray',
                 elinewidth=1,
                 capsize=0,
                 alpha=0.5,
                 zorder=0)
    count = 0
    for fil in ufilter:
        filind = np.where(filters == fil)
        #plt.scatter(mjd[filind], dra[filind], c = colors[count], label = fil, s = size, zorder=1)
        plt.scatter(mjd[filind],
                    dra[filind],
                    c=colordict[fil],
                    label=fil,
                    s=size,
                    zorder=1)
        count += 1
    plt.legend()
    diffmjd = max(mjd) - min(mjd)
    plt.xlim(min(mjd) - diffmjd / 4, max(mjd) + diffmjd / 4)
    plt.ylim(min(dra) - diffra / 4, max(dra) + diffra / 4)
    m, b = np.polyfit(mjd, dra, 1)
    plt.plot(mjd, mjd * pmra + b, c="k")
    plt.xlabel(r'$\Delta$ MJD (days)')
    plt.ylabel(r'$\Delta$ RA (arcsec)')

    # dec mjd plot
    plt.subplot(2, 2, 4)
    plt.errorbar(mjd,
                 ddec,
                 yerr=decerr,
                 fmt='none',
                 ecolor='lightgray',
                 elinewidth=1,
                 capsize=0,
                 alpha=0.5,
                 zorder=0)
    count = 0
    for fil in ufilter:
        filind = np.where(filters == fil)
        plt.scatter(mjd[filind],
                    ddec[filind],
                    c=colordict[fil],
                    label=fil,
                    s=size,
                    zorder=1)
        count += 1
    plt.legend()
    plt.xlim(min(mjd) - diffmjd / 4, max(mjd) + diffmjd / 4)
    plt.ylim(min(ddec) - diffdec / 4, max(ddec) + diffdec / 4)
    m, b = np.polyfit(mjd, ddec, 1)
    plt.plot(mjd, mjd * pmdec + b, c="k")
    plt.xlabel(r'$\Delta$ MJD (days)')
    plt.ylabel(r'$\Delta$ DEC (arcsec)')

    #magtime
    plt.subplot(2, 2, 3)
    count = 0
    for fil in ufilter:
        filind = np.where(filters == fil)
        plt.scatter(mjd[filind],
                    mag[filind],
                    c=colordict[fil],
                    label=fil,
                    s=size)
        count += 1
    plt.legend()
    diffmag = max(mag) - min(mag)
    plt.xlim(min(mjd) - diffmjd / 4, max(mjd) + diffmjd / 4)
    plt.ylim(min(mag) - diffmag / 4, max(mag) + diffmag / 4)
    plt.xlabel(r'$\Delta$ MJD (days)')
    plt.ylabel("Magnitude")

    outfile = outdir + idv + '.png'
    #outfile = outdir+idv+'_4panel.png'
    print('Saving figure to ' + outfile)
    plt.savefig(outfile, bbox_inches='tight')
コード例 #3
0
def exposure_update(exposure, redo=False):
    """ Update the measurement table using the broken up measid/objectid lists."""

    t00 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    iddir = '/data0/dnidever/nsc/instcal/v3/idstr/'
    version = 'v3'

    # Load the exposures table
    print('Loading exposure table')
    expcat = fits.getdata(
        '/net/dl2/dnidever/nsc/instcal/' + version +
        '/lists/nsc_v3_exposure_table.fits.gz', 1)

    # Make sure it's a list
    if type(exposure) is str: exposure = [exposure]

    # Match exposures to exposure catalog
    eind1, eind2 = dln.match(expcat['EXPOSURE'], exposure)
    nmatch = len(eind1)
    print(
        str(nmatch) + ' matches for ' + str(len(exposure)) +
        ' input exposures')

    if len(eind1) == 0:
        print('No exposures matched to exposure table')
        sys.exit()

    print('Updating measid for ' + str(len(exposure)) + ' exposures')

    # Loop over files
    for i in range(len(exposure)):
        t0 = time.time()
        exp = expcat['EXPOSURE'][eind1[i]]
        print(str(i + 1) + ' ' + exp)

        instcode = expcat['INSTRUMENT'][eind1[i]]
        dateobs = expcat['DATEOBS'][eind1[i]]
        night = dateobs[0:4] + dateobs[5:7] + dateobs[8:10]
        expdir = '/net/dl2/dnidever/nsc/instcal/' + version + '/' + instcode + '/' + night + '/' + exp
        edir = iddir + instcode + '/' + night + '/' + exp + '/'  # local directory for ID files
        #outdir = edir
        outdir = expdir

        # Check that the directory exists
        if os.path.exists(expdir) is False:
            print(expdir + ' NOT FOUND')
            continue

        # Check output file
        measfile = outdir + '/' + exp + '_meas.fits'
        if (os.path.exists(measfile + '.gz')) & (redo is False):
            print(measfile + '.gz already exists.  Skipping')
            continue

        # Log file
        #------------------
        # format is EXPOSURE_measure_update.DATETIME.log
        ltime = time.localtime()
        # time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1)
        smonth = str(ltime[1])
        if ltime[1] < 10: smonth = '0' + smonth
        sday = str(ltime[2])
        if ltime[2] < 10: sday = '0' + sday
        syear = str(ltime[0])[2:]
        shour = str(ltime[3])
        if ltime[3] < 10: shour = '0' + shour
        sminute = str(ltime[4])
        if ltime[4] < 10: sminute = '0' + sminute
        ssecond = str(int(ltime[5]))
        if ltime[5] < 10: ssecond = '0' + ssecond
        logtime = smonth + sday + syear + shour + sminute + ssecond
        logfile = outdir + '/' + exp + '_measure_update.' + logtime + '.log'
        if os.path.exists(logfile): os.remove(logfile)

        # Set up logging to screen and logfile
        logFormatter = logging.Formatter(
            "%(asctime)s [%(levelname)-5.5s]  %(message)s")
        if logging.getLogger().hasHandlers() is True:
            rootLogger.handlers = []  # remove all handlers
        rootLogger = logging.getLogger()
        fileHandler = logging.FileHandler(logfile)
        fileHandler.setFormatter(logFormatter)
        rootLogger.addHandler(fileHandler)
        consoleHandler = logging.StreamHandler()
        consoleHandler.setFormatter(logFormatter)
        rootLogger.addHandler(consoleHandler)
        rootLogger.setLevel(logging.NOTSET)

        rootLogger.info(
            'Adding objectID for measurement catalogs for exposure = ' + exp)
        rootLogger.info("expdir = " + expdir)
        rootLogger.info("host = " + host)
        rootLogger.info(" ")

        #  Load the exposure and metadata files
        metafile = expdir + '/' + exp + '_meta.fits'
        meta = Table.read(metafile, 1)
        nmeta = len(meta)
        chstr = Table.read(metafile, 2)
        rootLogger.info('KLUDGE!!!  Changing /dl1 filenames to /dl2 filenames')
        cols = ['EXPDIR', 'FILENAME', 'MEASFILE']
        for c in cols:
            f = np.char.array(chstr[c]).decode()
            f = np.char.array(f).replace('/dl1/users/dnidever/',
                                         '/dl2/dnidever/')
            chstr[c] = f
        nchips = len(chstr)

        # Get "good" chips, astrometrically calibrated
        astokay = np.zeros(nchips, bool)
        for k in range(nchips):
            # Check that this chip was astrometrically calibrated
            #   and falls in to HEALPix region
            # Also check for issues with my astrometric corrections
            if (chstr['NGAIAMATCH'][k]
                    == 0) | (np.max(np.abs(chstr['RACOEF'][k])) > 1) | (np.max(
                        np.abs(chstr['DECCOEF'][k])) > 1):
                astokay[k] = False
            else:
                astokay[k] = True
        #gdch,ngdch,bdch,nbdch = dln.where(chstr['NGAIAMATCH']>0,comp=True)
        gdch, ngdch, bdch, nbdch = dln.where(astokay == True, comp=True)
        if nbdch > 0:
            rootLogger.info(
                str(nbdch) + ' chips were not astrometrically calibrated')

        measdtype = np.dtype([('MEASID', 'S50'), ('OBJECTID', 'S50'),
                              ('EXPOSURE', 'S50'), ('CCDNUM', '>i2'),
                              ('FILTER', 'S2'), ('MJD', '>f8'), ('X', '>f4'),
                              ('Y', '>f4'), ('RA', '>f8'), ('RAERR', '>f4'),
                              ('DEC', '>f8'), ('DECERR', '>f4'),
                              ('MAG_AUTO', '>f4'), ('MAGERR_AUTO', '>f4'),
                              ('MAG_APER1', '>f4'), ('MAGERR_APER1', '>f4'),
                              ('MAG_APER2', '>f4'), ('MAGERR_APER2', '>f4'),
                              ('MAG_APER4', '>f4'), ('MAGERR_APER4', '>f4'),
                              ('MAG_APER8', '>f4'), ('MAGERR_APER8', '>f4'),
                              ('KRON_RADIUS', '>f4'), ('ASEMI', '>f4'),
                              ('ASEMIERR', '>f4'), ('BSEMI', '>f4'),
                              ('BSEMIERR', '>f4'), ('THETA', '>f4'),
                              ('THETAERR', '>f4'), ('FWHM', '>f4'),
                              ('FLAGS', '>i2'), ('CLASS_STAR', '>f4')])

        # Load and concatenate the meas catalogs
        chstr[
            'MEAS_INDEX'] = -1  # keep track of where each chip catalog starts
        count = 0
        meas = Table(
            data=np.zeros(int(np.sum(chstr['NMEAS'][gdch])), dtype=measdtype))
        rootLogger.info(
            'Loading and concatenating the chip measurement catalogs')
        for j in range(ngdch):
            jch = gdch[j]
            chfile = chstr['MEASFILE'][jch].strip()
            if chfile == '': continue
            #print(str(j+1)+' Loading '+chfile)
            meas1 = Table.read(chfile, 1)  # load chip meas catalog
            nmeas1 = len(meas1)
            meas[count:count + nmeas1] = meas1
            chstr['MEAS_INDEX'][jch] = count
            count += nmeas1
        measid = np.char.array(meas['MEASID']).strip().decode()
        nmeas = len(meas)
        rootLogger.info(str(nmeas) + ' measurements')

        # Look for the id files
        allfiles = glob(edir + exp + '__*.npy')
        # check for duplicates, single and split into high-res healpix idstr files
        #  always use the split ones
        base = [os.path.splitext(os.path.basename(f))[0] for f in allfiles]
        hfile = [f.split('__')[-1] for f in base]
        hh = [f.split('_')[0] for f in hfile]  # the healpix portion
        hindex = dln.create_index(hh)
        files = []
        for j in range(len(hindex['value'])):
            hpix1 = hindex['value'][j]
            hind = hindex['index'][hindex['lo'][j]:hindex['hi'][j] + 1]
            files1 = np.array(allfiles)[hind]
            # duplicates, use the split/hires ones
            if hindex['num'][j] > 1:
                gd = dln.grep(files1, str(hpix1) + '_n', index=True)
                if len(gd) == 0:
                    raise ValueError(
                        'Something is wrong with the idstr files, duplicates')
                files += list(files1[gd])
            else:
                files += list(files1)
        nfiles = len(files)
        rootLogger.info(str(nfiles) + ' ID files to load')

        # Loop over ID files and load them up
        df = np.dtype([('measid', np.str, 50), ('objectid', np.str, 50)])
        idcat = np.zeros(10000, dtype=df)
        count = 0
        for k in range(nfiles):
            idcat1 = np.load(files[k])
            nidcat1 = len(idcat1)
            # Add more elements
            if count + nidcat1 > len(idcat):
                idcat = dln.add_elements(idcat, np.maximum(100000, nidcat1))
            # Stuff in the data
            idcat[count:count + nidcat1] = idcat1
            count += nidcat1
        # Trim extra elements
        if len(idcat) > count: idcat = idcat[0:count]
        rootLogger.info('IDs for ' + str(len(idcat)) + ' measurements')

        # Match up with measid
        idcat_measid = np.char.array(idcat['measid']).strip()
        if isinstance(idcat_measid[0], bytes):
            idcat_measid = idcat_measid.decode()
        ind1, ind2 = dln.match(idcat_measid, measid)
        nmatch = len(ind1)
        rootLogger.info('Matches for ' + str(nmatch) + ' measurements')
        if nmatch > 0:
            meas['OBJECTID'][ind2] = idcat['objectid'][ind1]

        if (len(ind1) > len(measid)) | (len(idcat) > len(meas)):
            rootLogger.info('There are ' + str(len(idcat) - len(meas)) +
                            ' duplicates!!')

        # Checking for missing objectid
        ind, nind = dln.where(
            np.char.array(meas['OBJECTID']).strip().decode() == '')
        # There can be missing/orphaned measurements at healpix boundaries in crowded
        # regions when the DBSCAN eps is different.  But there should be very few of these.
        # At this point, let's allow this to pass
        if nind > 0:
            rootLogger.info('WARNING: ' + str(nind) +
                            ' measurements are missing OBJECTIDs')
        #if ((nmeas>=20000) & (nind>20)) | ((nmeas<20000) & (nind>3)):
        #    rootLogger.info('More missing OBJECTIDs than currently allowed.')
        #    hpix = hp.ang2pix(128,meas['RA'][ind],meas['DEC'][ind],lonlat=True)
        #    hindex = dln.create_index(hpix)
        #    out = []
        #    for i in range(len(hindex['value'])):
        #        out.append(str(hindex['value'][i])+' ('+str(hindex['num'][i])+')')
        #    rootLogger.info('healpix of missing measurements: '+', '.join(out))
        #    outtxt = [str(nind)+' missing IDs','healpix of missing measurements: '+', '.join(out)]
        #    dln.writelines(outdir+'/'+exp+'_meas.ERROR',outtxt)
        #    continue

        # Output the updated measurement catalog
        #  Writing a single FITS file is much faster than many small ones
        # could put it in /data0 but db01 won't be able to access that
        rootLogger.info('Writing final measurement catalog to ' + measfile)
        meas.write(measfile, overwrite=True)
        if os.path.exists(measfile + '.gz'): os.remove(measfile + '.gz')
        ret = subprocess.call(['gzip', measfile])  # compress final catalog

        # Update the meta file as well, need to update the /dl2 filenames
        metafile = outdir + '/' + exp + '_meta.fits'
        rootLogger.info('Updating meta file ' + metafile)
        meta.write(metafile, overwrite=True)
        hdulist = fits.open(metafile)
        hdu = fits.table_to_hdu(chstr)
        hdulist.append(hdu)
        hdulist.writeto(metafile, overwrite=True)
        hdulist.close()

        # Create a file saying that the files were updated okay.
        #dln.writelines(expdir+'/'+exp+'_meas.updated','')
        dln.writelines(outdir + '/' + exp + '_meas.updated', '')
        # Remove meas.ERROR, if it exists
        if os.path.exists(outdir + '/' + exp + '_meas.ERROR'):
            os.remove(outdir + '/' + exp + '_meas.ERROR')

        rootLogger.info('dt = ' + str(time.time() - t0) + ' sec.')

    print('dt = %6.1f sec.' % (time.time() - t00))
コード例 #4
0
def measurement_info(pix):

    t0 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    # Get version number from exposure directory
    #lo = expdir.find('nsc/instcal/')
    #dum = expdir[lo+12:]
    #version = dum[0:dum.find('/')]
    version = 'v3'
    cmbdir = '/net/dl2/dnidever/nsc/instcal/' + version + '/'
    #edir = '/net/dl1/users/dnidever/nsc/instcal/'+version+'/'
    #nside = 128

    #expstr = fits.getdata('/net/dl2/dnidever/nsc/instcal/'+version+'/lists/nsc_'+version+'_exposures.fits.gz',1)
    # too much many columns, just need full path and base
    metadb = '/net/dl2/dnidever/nsc/instcal/' + version + '/lists/nsc_meta.db'
    data = querydb(metadb, 'exposure', 'expdir')
    data = [a[0] for a in data]
    expdir = np.char.array(data)
    expdir = expdir.rstrip('/')
    base = [os.path.basename(e) for e in expdir]
    base = np.char.array(base)

    # If we put the output files in a PIX_idstr/ subdirectory then I wouldn't need to
    # know all of this exposure path information

    dbfile = cmbdir + 'combine/' + str(
        int(pix) // 1000) + '/' + str(pix) + '_idstr.db'
    print(dbfile)

    # Deal with sub-pixels!!

    # Get the row count
    db = sqlite3.connect(dbfile,
                         detect_types=sqlite3.PARSE_DECLTYPES
                         | sqlite3.PARSE_COLNAMES)
    cur = db.cursor()
    cur.execute('select count(rowid) from idstr')
    data = cur.fetchall()
    db.close()
    nrows = data[0][0]
    print(str(nrows) + ' rows')

    print('Loading the data')
    idstr = readidstrdb(dbfile)
    # Need to do this in chunks if there are too many rows

    # Get unique exposures
    exposure = np.char.array(idstr['exposure'])
    expindex = dln.create_index(exposure)
    nexp = len(expindex['value'])
    print(str(nexp) + ' exposures')

    # Get absolute paths
    ind1, ind2 = dln.match(base, expindex['value'])
    expdirs = np.zeros(nexp, (np.str, 200))
    expdirs[ind2] = expdir[ind1]

    # Convert /dl1 to /dl2
    expdirs = np.char.array(expdirs).replace('/dl1/users/dnidever/',
                                             '/dl2/dnidever/')

    # Loop through the exposures and write out their information
    for e in range(nexp):
        exposure1 = expindex['value'][e]
        eind = expindex['index'][expindex['lo'][e]:expindex['hi'][e] + 1]
        idstr1 = idstr[eind]
        nidstr1 = len(idstr1)
        # Just need measid,objectid, and only the width that we need
        mlen = np.max([len(m) for m in idstr1['measid']])
        olen = np.max([len(o) for o in idstr1['objectid']])
        dt = np.dtype([('measid', np.str, mlen), ('objectid', np.str, olen)])
        new = np.zeros(nidstr1, dtype=dt)
        new['measid'] = idstr1['measid']
        new['objectid'] = idstr1['objectid']
        print(str(e + 1) + ' ' + exposure1 + ' ' + str(nidstr1))

        # Put these files in expdir/idstr/ subdirectory!!

        # Write it out
        outfile = expdirs[e] + '/' + exposure1 + '_objectid_list.fits'
        #outfile = expdirs[e]+'/'+exposure1+'_objectid_list.npy'
        print('  Writing ' + outfile)
        #if os.path.exists(outfile): os.remove(outfile)
        #np.save(outfile,new)   # not any faster
        Table(new).write(outfile, overwrite=True)

    print('dt = ' + str(time.time() - t0) + ' sec.')

    import pdb
    pdb.set_trace()

    # Check if output file already exists
    #base = os.path.basename(expdir)

    ## Log file
    ##------------------
    ## format is nsc_combine_main.DATETIME.log
    #ltime = time.localtime()
    ## time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1)
    #smonth = str(ltime[1])
    #if ltime[1]<10: smonth = '0'+smonth
    #sday = str(ltime[2])
    #if ltime[2]<10: sday = '0'+sday
    #syear = str(ltime[0])[2:]
    #shour = str(ltime[3])
    #if ltime[3]<10: shour='0'+shour
    #sminute = str(ltime[4])
    #if ltime[4]<10: sminute='0'+sminute
    #ssecond = str(int(ltime[5]))
    #if ltime[5]<10: ssecond='0'+ssecond
    #logtime = smonth+sday+syear+shour+sminute+ssecond
    #logfile = expdir+'/'+base+'_measure_update.'+logtime+'.log'
    #if os.path.exists(logfile): os.remove(logfile)

    ## Set up logging to screen and logfile
    #logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s]  %(message)s")
    #rootLogger = logging.getLogger()
    #fileHandler = logging.FileHandler(logfile)
    #fileHandler.setFormatter(logFormatter)
    #rootLogger.addHandler(fileHandler)
    #consoleHandler = logging.StreamHandler()
    #consoleHandler.setFormatter(logFormatter)
    #rootLogger.addHandler(consoleHandler)
    #rootLogger.setLevel(logging.NOTSET)

    #rootLogger.info('Adding objectID for measurement catalogs for exposure = '+base)
    #rootLogger.info("expdir = "+expdir)
    #rootLogger.info("host = "+host)
    #rootLogger.info(" ")

    #  Load the exposure and metadata files
    metafile = expdir + '/' + base + '_meta.fits'
    meta = Table.read(metafile, 1)
    nmeta = len(meta)
    chstr = Table.read(metafile, 2)
    rootLogger.info('KLUDGE!!!  Changing /dl1 filenames to /dl2 filenames')
    cols = ['EXPDIR', 'FILENAME', 'MEASFILE']
    for c in cols:
        f = np.char.array(chstr[c]).decode()
        f = np.char.array(f).replace('/dl1/users/dnidever/', '/dl2/dnidever/')
        chstr[c] = f
    nchips = len(chstr)

    measdtype = np.dtype([('MEASID', 'S50'), ('OBJECTID', 'S50'),
                          ('EXPOSURE', 'S50'), ('CCDNUM', '>i2'),
                          ('FILTER', 'S2'), ('MJD', '>f8'), ('X', '>f4'),
                          ('Y', '>f4'), ('RA', '>f8'), ('RAERR', '>f4'),
                          ('DEC', '>f8'), ('DECERR', '>f4'),
                          ('MAG_AUTO', '>f4'), ('MAGERR_AUTO', '>f4'),
                          ('MAG_APER1', '>f4'), ('MAGERR_APER1', '>f4'),
                          ('MAG_APER2', '>f4'), ('MAGERR_APER2', '>f4'),
                          ('MAG_APER4', '>f4'), ('MAGERR_APER4', '>f4'),
                          ('MAG_APER8', '>f4'), ('MAGERR_APER8', '>f4'),
                          ('KRON_RADIUS', '>f4'), ('ASEMI', '>f4'),
                          ('ASEMIERR', '>f4'), ('BSEMI', '>f4'),
                          ('BSEMIERR', '>f4'), ('THETA', '>f4'),
                          ('THETAERR', '>f4'), ('FWHM', '>f4'),
                          ('FLAGS', '>i2'), ('CLASS_STAR', '>f4')])

    # Load and concatenate the meas catalogs
    chstr['MEAS_INDEX'] = 0  # keep track of where each chip catalog starts
    count = 0
    meas = Table(data=np.zeros(int(np.sum(chstr['NMEAS'])), dtype=measdtype))
    rootLogger.info('Loading and concatenating the chip measurement catalogs')
    for i in range(nchips):
        meas1 = Table.read(chstr['MEASFILE'][i].strip(),
                           1)  # load chip meas catalog
        nmeas1 = len(meas1)
        meas[count:count + nmeas1] = meas1
        chstr['MEAS_INDEX'][i] = count
        count += nmeas1
    measid = np.char.array(meas['MEASID']).strip().decode()
    nmeas = len(meas)
    rootLogger.info(str(nmeas) + ' measurements')

    # Get the OBJECTID from the combined healpix file IDSTR structure
    #  remove any sources that weren't used

    # Figure out which healpix this figure overlaps
    pix = hp.ang2pix(nside, meas['RA'], meas['DEC'], lonlat=True)
    upix = np.unique(pix)
    npix = len(upix)
    rootLogger.info(str(npix) + ' HEALPix to query')

    # Loop over the HEALPix pixels
    ntotmatch = 0
    idstr_dtype = np.dtype([('measid', np.str, 200), ('objectid', np.str, 200),
                            ('pix', int)])
    idstr = np.zeros(nmeas, dtype=idstr_dtype)
    cnt = 0
    for i in range(npix):
        fitsfile = cmbdir + 'combine/' + str(int(upix[i]) // 1000) + '/' + str(
            upix[i]) + '.fits.gz'
        dbfile = cmbdir + 'combine/' + str(int(upix[i]) // 1000) + '/' + str(
            upix[i]) + '_idstr.db'
        if os.path.exists(dbfile):
            # Read meas id information from idstr database for this expoure
            #data = querydb(dbfile,table='idstr',cols='measid,objectid',where="exposure=='"+base+"'")
            idstr1 = readidstrdb(dbfile, where="exposure=='" + base + "'")
            nidstr1 = len(idstr1)
            if nidstr1 > 0:
                idstr['measid'][cnt:cnt + nidstr1] = idstr1['measid']
                idstr['objectid'][cnt:cnt + nidstr1] = idstr1['objectid']
                idstr['pix'][cnt:cnt + nidstr1] = upix[i]
                cnt += nidstr1
            rootLogger.info(
                str(i + 1) + ' ' + str(upix[i]) + ' ' + str(nidstr1))
            #nmatch = 0
            #if nidstr>0:
            #    idstr_measid = np.char.array(idstr['measid']).strip()
            #    idstr_objectid = np.char.array(idstr['objectid']).strip()
            #    #ind1,ind2 = dln.match(idstr_measid,measid)
            #    nmatch = len(ind1)
            #    if nmatch>0:
            #        meas['OBJECTID'][ind2] = idstr_objectid[ind1]
            #        ntotmatch += nmatch
            #rootLogger.info(str(i+1)+' '+str(upix[i])+' '+str(nmatch))

        else:
            rootLogger.info(
                str(i + 1) + ' ' + dbfile +
                ' NOT FOUND.  Checking for high-resolution database files.')
            # Check if there are high-resolution healpix idstr databases
            hidbfiles = glob(cmbdir + 'combine/' + str(int(upix[i]) // 1000) +
                             '/' + str(upix[i]) + '_n*_*_idstr.db')
            nhidbfiles = len(hidbfiles)
            if os.path.exists(fitsfile) & (nhidbfiles > 0):
                rootLogger.info('Found high-resolution HEALPix IDSTR files')
                for j in range(nhidbfiles):
                    dbfile1 = hidbfiles[j]
                    dbbase1 = os.path.basename(dbfile1)
                    idstr1 = readidstrdb(dbfile1,
                                         where="exposure=='" + base + "'")
                    nidstr1 = len(idstr1)
                    if nidstr1 > 0:
                        idstr['measid'][cnt:cnt + nidstr1] = idstr1['measid']
                        idstr['objectid'][cnt:cnt +
                                          nidstr1] = idstr1['objectid']
                        idstr['pix'][cnt:cnt + nidstr1] = upix[i]
                        cnt += nidstr1
                    rootLogger.info('  ' + str(j + 1) + ' ' + dbbase1 + ' ' +
                                    str(upix[i]) + ' ' + str(nidstr1))
                    #idstr_measid = np.char.array(idstr['measid']).strip()
                    #idstr_objectid = np.char.array(idstr['objectid']).strip()
                    #ind1,ind2 = dln.match(idstr_measid,measid)
                    #nmatch = len(ind1)
                    #if nmatch>0:
                    #    meas['OBJECTID'][ind2] = idstr_objectid[ind1]
                    #    ntotmatch += nmatch
                    #rootLogger.info('  '+str(j+1)+' '+dbbase1+' '+str(upix[i])+' '+str(nmatch))

    # Trim any leftover elements of IDSTR
    if cnt < nmeas:
        idstr = idstr[0:cnt]

    # Now match them all up
    rootLogger.info('Matching the measurements')
    idstr_measid = np.char.array(idstr['measid']).strip()
    idstr_objectid = np.char.array(idstr['objectid']).strip()
    ind1, ind2 = dln.match(idstr_measid, measid)
    nmatch = len(ind1)
    if nmatch > 0:
        meas['OBJECTID'][ind2] = idstr_objectid[ind1]

    # Only keep sources with an objectid
    ind, nind = dln.where(
        np.char.array(meas['OBJECTID']).strip().decode() == '')
    # There can be missing/orphaned measurements at healpix boundaries in crowded
    # regions when the DBSCAN eps is different.  But there should be very few of these.
    # At this point, let's allow this to pass
    if nind > 0:
        rootLogger.info('WARNING: ' + str(nind) +
                        ' measurements are missing OBJECTIDs')
    if ((nmeas >= 20000) & (nind > 20)) | ((nmeas < 20000) & (nind > 3)):
        rootLogger.info('More missing OBJECTIDs than currently allowed.')
        raise ValueError('More missing OBJECTIDs than currently allowed.')

    # Output the updated catalogs
    #rootLogger.info('Updating measurement catalogs')
    #for i in range(nchips):
    #    measfile1 = chstr['MEASFILE'][i].strip()
    #    lo = chstr['MEAS_INDEX'][i]
    #    hi = lo+chstr['NMEAS'][i]
    #    meas1 = meas[lo:hi]
    #    meta1 = Table.read(measfile1,2)        # load the meta extensions
    #    # 'KLUDGE!!!  Changing /dl1 filenames to /dl2 filenames')
    #    cols = ['EXPDIR','FILENAME','MEASFILE']
    #    for c in cols:
    #        f = np.char.array(meta1[c]).decode()
    #        f = np.char.array(f).replace('/dl1/users/dnidever/','/dl2/dnidever/')
    #        meta1[c] = f
    #    # Copy as a backup
    #    if os.path.exists(measfile1+'.bak'): os.remove(measfile1+'.bak')
    #    dum = shutil.move(measfile1,measfile1+'.bak')
    #    # Write new catalog
    #    #meas1.write(measfile1,overwrite=True)  # first, measurement table
    #    # append other fits binary tabl
    #    #hdulist = fits.open(measfile1)
    #    rootLogger.info('Writing '+measfile1)
    #    hdulist = fits.HDUList()
    #    hdulist.append(fits.table_to_hdu(meas1))       # first, meas catalog
    #    hdulist.append(fits.table_to_hdu(meta1))       # second, meta
    #    hdulist.writeto(measfile1,overwrite=True)
    #    hdulist.close()
    #    # Create a file saying that the file was successfully updated.
    #    dln.writelines(measfile1+'.updated','')
    #    # Delete backups
    #    if os.path.exists(measfile1+'.bak'): os.remove(measfile1+'.bak')

    measfile = expdir + '/' + base + '_meas.fits'
    meas.write(measfile, overwrite=True)
    if os.path.exists(measfile + '.gz'): os.remove(measfile + '.gz')
    ret = subprocess.call(['gzip', measfile])  # compress final catalog

    # Update the meta file as well, need to the /dl2 filenames
    rootLogger.info('Updating meta file')
    meta.write(metafile, overwrite=True)
    hdulist = fits.open(metafile)
    hdu = fits.table_to_hdu(chstr)
    hdulist.append(hdu)
    hdulist.writeto(metafile, overwrite=True)
    hdulist.close()

    # Create a file saying that the files were updated okay.
    dln.writelines(expdir + '/' + base + '_meas.updated', '')

    rootLogger.info('dt = ' + str(time.time() - t0) + ' sec.')
コード例 #5
0
def breakup_idstr(dbfile):
    """ Break-up idstr file into separate measid/objectid lists per exposure on /data0."""

    t00 = time.time()

    outdir = '/data0/dnidever/nsc/instcal/v3/idstr/'

    # Load the exposures table
    expcat = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure_table.fits.gz',1)

    # Make sure it's a list
    if type(dbfile) is str: dbfile=[dbfile]

    print('Breaking up '+str(len(dbfile))+' database files')

    # Loop over files
    for i,dbfile1 in enumerate(dbfile):
        print(str(i+1)+' '+dbfile1)
        if os.path.exists(dbfile1):
            t0 = time.time()
            dbbase1 = os.path.basename(dbfile1)[0:-9]  # remove _idstr.db ending
            # Get existing index names for this database
            d = sqlite3.connect(dbfile1, detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)
            cur = d.cursor()
            cmd = 'select measid,exposure,objectid from idstr'
            t1 = time.time()
            data = cur.execute(cmd).fetchall()
            print('  '+str(len(data))+' rows read in %5.1f sec. ' % (time.time()-t1))
            # Break up data into lists
            measid,exposure,objectid = list(zip(*data))
            measid = np.array(measid)
            objectid = np.array(objectid)
            exposure = np.array(exposure)
            eindex = dln.create_index(exposure)
            # Match exposures to exposure catalog
            ind1,ind2 = dln.match(expcat['EXPOSURE'],eindex['value'])
            # Loop over exposures and write output files
            nexp = len(eindex['value'])
            print('  '+str(nexp)+' exposures')
            measid_maxlen = np.max(dln.strlen(measid))
            objectid_maxlen = np.max(dln.strlen(objectid))
            df = np.dtype([('measid',np.str,measid_maxlen+1),('objectid',np.str,objectid_maxlen+1)])
            # Loop over the exposures and write out the files
            for k in range(nexp):
                if nexp>100:
                    if k % 100 == 0: print('  '+str(k+1))
                ind = eindex['index'][eindex['lo'][k]:eindex['hi'][k]+1]
                cat = np.zeros(len(ind),dtype=df)
                cat['measid'] = measid[ind]
                cat['objectid'] = objectid[ind]
                instcode = expcat['INSTRUMENT'][ind1[k]]
                dateobs = expcat['DATEOBS'][ind1[k]]
                night = dateobs[0:4]+dateobs[5:7]+dateobs[8:10]
                if os.path.exists(outdir+instcode+'/'+night+'/'+eindex['value'][k]) is False:
                    # Sometimes this crashes because another process is making the directory at the same time
                    try:
                        os.makedirs(outdir+instcode+'/'+night+'/'+eindex['value'][k])
                    except:
                        pass
                outfile = outdir+instcode+'/'+night+'/'+eindex['value'][k]+'/'+eindex['value'][k]+'__'+dbbase1+'.npy'
                np.save(outfile,cat)
            print('  dt = %6.1f sec. ' % (time.time()-t0))
        else:
            print('  '+dbfile1+' NOT FOUND')

    print('dt = %6.1f sec.' % (time.time()-t00))
コード例 #6
0
def meascutout(meas, obj, size=10, outdir='./', domask=True):
    """ Input the measurements and create cutouts. """

    expstr = fits.getdata(
        '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposures.fits.gz', 1)
    #expstr = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure.fits.gz',1)
    decam = Table.read('/home/dnidever/projects/delvered/data/decam.txt',
                       format='ascii')

    objid = obj['id'][0]

    # Sort by MJD
    si = np.argsort(meas['mjd'])
    meas = meas[si]

    # Make cut on FWHM
    # maybe only use values for 0.5*fwhm_chip to 1.5*fwhm_chip
    sql = "select chip.* from nsc_dr2.chip as chip join nsc_dr2.meas as meas on chip.exposure=meas.exposure and chip.ccdnum=meas.ccdnum"
    sql += " where meas.objectid='" + objid + "'"
    chip = qc.query(sql=sql, fmt='table')
    ind3, ind4 = dln.match(chip['exposure'], meas['exposure'])
    si = np.argsort(ind4)  # sort by input meas catalog
    ind3 = ind3[si]
    ind4 = ind4[si]
    chip = chip[ind3]
    meas = meas[ind4]
    gdfwhm, = np.where((meas['fwhm'] > 0.2 * chip['fwhm'])
                       & (meas['fwhm'] < 2.0 * chip['fwhm']))
    if len(gdfwhm) == 0:
        print('All measurements have bad FWHM values')
        return
    if len(gdfwhm) < len(meas):
        print('Removing ' + str(len(meas) - len(gdfwhm)) +
              ' measurements with bad FWHM values')
        meas = meas[gdfwhm]

    ind1, ind2 = dln.match(expstr['base'], meas['exposure'])
    nind = len(ind1)
    if nind == 0:
        print('No matches')
        return
    # Sort by input meas catalog
    si = np.argsort(ind2)
    ind1 = ind1[si]
    ind2 = ind2[si]

    # Create the reference WCS
    wref = WCS(naxis=2)
    pixscale = 0.26  # DECam, "/pix
    npix = round(size / pixscale)
    if npix % 2 == 0:  # must be odd
        npix += 1
    hpix = npix // 2  # center of image
    wref.wcs.ctype = ['RA---TAN', 'DEC--TAN']
    wref.wcs.crval = [obj['ra'][0], obj['dec'][0]]
    wref.wcs.crpix = [npix // 2, npix // 2]
    wref.wcs.cd = np.array([[pixscale / 3600.0, 0.0], [0.0, pixscale / 3600]])
    wref.array_shape = (npix, npix)
    refheader = wref.to_header()
    refheader['NAXIS'] = 2
    refheader['NAXIS1'] = npix
    refheader['NAXIS2'] = npix

    # Load the data
    instrument = expstr['instrument'][ind1]
    plver = expstr['plver'][ind1]
    fluxfile = expstr['file'][ind1]
    fluxfile = fluxfile.replace('/net/mss1/', '/mss1/')  # for thing/hulk
    maskfile = expstr['maskfile'][ind1]
    maskfile = maskfile.replace('/net/mss1/', '/mss1/')  # for thing/hulk
    ccdnum = meas['ccdnum'][ind2]
    figfiles = []
    xmeas = []
    ymeas = []
    cutimarr = np.zeros((npix, npix, nind), float)
    for i in range(nind):
        #for i in range(3):
        instcode = instrument[i]
        plver1 = plver[i]
        try:
            if instrument[i] == 'c4d':
                dind, = np.where(decam['CCDNUM'] == ccdnum[i])
                extname = decam['NAME'][dind[0]]
                im, head = getfitsext(fluxfile[i], extname, header=True)
                mim, mhead = getfitsext(maskfile[i], extname, header=True)
                #im,head = fits.getdata(fluxfile[i],header=True,extname=extname)
                #mim,mhead = fits.getdata(maskfile[i],header=True,extname=extname)
            else:
                im, head = fits.getdata(fluxfile[i], ccdnum[i], header=True)
                mim, mhead = fits.getdata(maskfile[i], ccdnum[i], header=True)
        except:
            print('error')
            import pdb
            pdb.set_trace()

        # Turn the mask from integer to bitmask
        if ((instcode == 'c4d') &
            (plver1 >= 'V3.5.0')) | (instcode == 'k4m') | (instcode == 'ksb'):
            omim = mim.copy()
            mim *= 0
            nonzero = (omim > 0)
            mim[nonzero] = 2**((omim - 1)[nonzero])  # This takes about 1 sec
        # Fix the DECam Pre-V3.5.0 masks
        if (instcode == 'c4d') & (plver1 < 'V3.5.0'):
            omim = mim.copy()
            mim *= 0  # re-initialize
            mim += (np.bitwise_and(omim, 1) == 1) * 1  # bad pixels
            mim += (np.bitwise_and(omim, 2) == 2) * 4  # saturated
            mim += (np.bitwise_and(omim, 4) == 4) * 32  # interpolated
            mim += (np.bitwise_and(omim, 16) == 16) * 16  # cosmic ray
            mim += (np.bitwise_and(omim, 64) == 64) * 8  # bleed trail

        # Get chip-level information
        exposure = os.path.basename(fluxfile[i])[0:-8]  # remove fits.fz
        chres = qc.query(sql="select * from nsc_dr2.chip where exposure='" +
                         exposure + "' and ccdnum=" + str(ccdnum[i]),
                         fmt='table')

        w = WCS(head)
        # RA/DEC correction for the object
        lon = obj['ra'][0] - chres['ra'][0]
        lat = obj['dec'][0] - chres['dec'][0]
        racorr = chres['ra_coef1'][0] + chres['ra_coef2'][0] * lon + chres[
            'ra_coef3'][0] * lon * lat + chres['ra_coef4'][0] * lat
        deccorr = chres['dec_coef1'][0] + chres['dec_coef2'][0] * lon + chres[
            'dec_coef3'][0] * lon * lat + chres['dec_coef4'][0] * lat
        # apply these offsets to the header WCS CRVAL
        #w.wcs.crval += [racorr,deccorr]
        #head['CRVAL1'] += racorr
        #head['CRVAL2'] += deccorr
        print(racorr, deccorr)

        # Object X/Y position
        xobj, yobj = w.all_world2pix(obj['ra'], obj['dec'], 0)
        # Get the cutout
        xcen = meas['x'][ind2[i]] - 1  # convert to 0-indexes
        ycen = meas['y'][ind2[i]] - 1
        smim = dln.gsmooth(im, 2)
        # use the object coords for centering
        #cutim,xr,yr = cutout(smim,xobj,yobj,size)

        # Mask the bad pixels
        if domask == True:
            badmask = (mim > 0)
            im[badmask] = np.nanmedian(im[~badmask])
        else:
            badmask = (im < 0)

        # Create a common TAN WCS that each image gets interpoled onto!!!
        #hdu1 = fits.open(fluxfile[i],extname=extname)
        smim1 = dln.gsmooth(im, 1.5)
        hdu = fits.PrimaryHDU(smim1, head)
        cutim, footprint = reproject_interp(hdu, refheader,
                                            order='bicubic')  # biquadratic
        cutim[footprint == 0] = np.nanmedian(
            im[~badmask])  # set out-of-bounds to background
        #xr = [0,npix-1]
        #yr = [0,npix-1]
        xr = [-hpix * pixscale, hpix * pixscale]
        yr = [-hpix * pixscale, hpix * pixscale]

        # exposure_ccdnum, filter, MJD, delta_MJD, mag
        print(
            str(i + 1) + ' ' + meas['exposure'][ind2[i]] + ' ' +
            str(ccdnum[i]) + ' ' + str(meas['x'][ind2[i]]) + ' ' +
            str(meas['y'][ind2[i]]) + ' ' + str(meas['mag_auto'][ind2[i]]))

        #figdir = '/net/dl2/dnidever/nsc/instcal/v3/hpm2/cutouts/'
        figfile = outdir
        figfile += '%s_%04d_%s_%02d.jpg' % (str(
            obj['id'][0]), i + 1, meas['exposure'][ind2[i]], ccdnum[i])
        figfiles.append(figfile)
        matplotlib.use('Agg')
        plt.rc('font', size=15)
        plt.rc('axes', titlesize=20)
        plt.rc('axes', labelsize=20)
        plt.rc('xtick', labelsize=20)
        plt.rc('ytick', labelsize=20)
        #plt.rcParams.update({'font.size': 15})
        #plt.rcParams.update({'axes.size': 20})
        #plt.rcParams.update({'xtick.size': 20})
        #plt.rcParams.update({'ytick.size': 20})
        if os.path.exists(figfile): os.remove(figfile)
        fig = plt.gcf()  # get current graphics window
        fig.clf()  # clear

        gskw = dict(width_ratios=[30, 1])
        fig, ax = plt.subplots(ncols=2, nrows=1, gridspec_kw=gskw)

        figsize = 8.0  #6.0
        figheight = 8.0
        figwidth = 9.0
        #ax = fig.subplots()  # projection=wcs
        #fig.set_figheight(figsize*0.8)
        fig.set_figheight(figheight)
        fig.set_figwidth(figwidth)
        med = np.nanmedian(smim)
        sig = dln.mad(smim)
        bigim, xr2, yr2 = cutout(smim, xcen, ycen, 151, missing=med)
        lmed = np.nanmedian(bigim)

        # Get the flux of the object and scale each image to the same height
        #meas.mag_aper1 = cat1.mag_aper[0] + 2.5*alog10(exptime) + chstr[i].zpterm
        #cmag = mag_auto + 2.5*alog10(exptime) + zpterm
        instmag = meas['mag_auto'][ind2[i]] - 2.5 * np.log10(
            chres['exptime'][0]) - chres['zpterm'][0]
        #mag = -2.5*log(flux)+25.0
        instflux = 10**((25.0 - instmag) / 2.5)
        print('flux = ' + str(instflux))
        # Get height of object
        #  flux of 2D Gaussian is ~2*pi*height*sigma^2
        pixscale1 = np.max(np.abs(w.wcs.cd)) * 3600
        fwhm = chres['fwhm'][0] / pixscale1
        instheight = instflux / (2 * 3.14 * (fwhm / 2.35)**2)
        print('height = ' + str(instheight))
        # Scale the images to the flux level of the first image
        cutim -= lmed
        if i == 0:
            instflux0 = instflux.copy()
            instheight0 = instheight.copy()
        else:
            scale = instflux0 / instflux
            #scale = instheight0/instheight
            cutim *= scale
            print('scaling image by ' + str(scale))

        #vmin = lmed-8*sig  # 3*sig
        #vmax = lmed+12*sig  # 5*sig
        if i == 0:
            vmin = -8 * sig  # 3*sig
            #vmax = 12*sig  # 5*sig
            vmax = 0.5 * instheight  # 0.5
            vmin0 = vmin
            vmax0 = vmax
        else:
            vmin = vmin0
            vmax = vmax0

        print('vmin = ' + str(vmin))
        print('vmax = ' + str(vmax))

        cutimarr[:, :, i] = cutim.copy()

        ax[0].imshow(cutim,
                     origin='lower',
                     aspect='auto',
                     interpolation='none',
                     extent=(xr[0], xr[1], yr[0], yr[1]),
                     vmin=vmin,
                     vmax=vmax,
                     cmap='viridis')  # viridis, Greys, jet
        #plt.imshow(cutim,origin='lower',aspect='auto',interpolation='none',
        #           vmin=vmin,vmax=vmax,cmap='viridis')   # viridis, Greys, jet
        #plt.colorbar()

        # show one vertical, one horizontal line pointing to the center but offset
        # then a small dot on the meas position
        # 13, 8
        ax[0].plot(np.array([0, 0]),
                   np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
                   c='white',
                   alpha=0.7)
        ax[0].plot(np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
                   np.array([0, 0]),
                   c='white',
                   alpha=0.7)

        # Meas X/Y position
        xmeas1, ymeas1 = wref.all_world2pix(meas['ra'][ind2[i]],
                                            meas['dec'][ind2[i]], 0)
        xmeas.append(xmeas1)
        ymeas.append(ymeas1)
        ax[0].scatter([(xmeas1 - hpix) * pixscale],
                      [(ymeas1 - hpix) * pixscale],
                      c='r',
                      marker='+',
                      s=20)
        #plt.scatter([xmeas],[ymeas],c='r',marker='+',s=100)
        #plt.scatter([xcen],[ycen],c='r',marker='+',s=100)
        # Object X/Y position
        #xobj,yobj = w.all_world2pix(obj['ra'],obj['dec'],0)
        xobj, yobj = wref.all_world2pix(obj['ra'], obj['dec'], 0)
        #plt.scatter(xobj,yobj,marker='o',s=200,facecolors='none',edgecolors='y',linewidth=3)
        #plt.scatter(xobj,yobj,c='y',marker='+',s=100)
        #leg = ax.legend(loc='upper left', frameon=False)
        ax[0].set_xlabel(r'$\Delta$ RA (arcsec)')
        ax[0].set_ylabel(r'$\Delta$ DEC (arcsec)')
        ax[0].set_xlim((xr[1], xr[0]))  # sky right
        ax[0].set_ylim(yr)
        #plt.xlabel('X')
        #plt.ylabel('Y')
        #plt.xlim(xr)
        #plt.ylim(yr)
        #ax.annotate(r'S/N=%5.1f',xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
        co = 'white'  #'lightgray' # blue
        ax[0].annotate('%s  %02d  %s  %6.1f  ' %
                       (meas['exposure'][ind2[i]], ccdnum[i],
                        meas['filter'][ind2[i]], expstr['exptime'][ind1[i]]),
                       xy=(np.mean(xr), yr[0] + dln.valrange(yr) * 0.05),
                       ha='center',
                       color=co)
        ax[0].annotate(
            '%10.2f    $\Delta$t=%7.2f  ' %
            (meas['mjd'][ind2[i]], meas['mjd'][ind2[i]] - np.min(meas['mjd'])),
            xy=(xr[1] - dln.valrange(xr) * 0.05,
                yr[1] - dln.valrange(yr) * 0.05),
            ha='left',
            color=co)
        #               xy=(xr[0]+dln.valrange(xr)*0.05, yr[1]-dln.valrange(yr)*0.05),ha='left',color=co)
        ax[0].annotate('%s = %5.2f +/- %4.2f' %
                       (meas['filter'][ind2[i]], meas['mag_auto'][ind2[i]],
                        meas['magerr_auto'][ind2[i]]),
                       xy=(xr[0] + dln.valrange(xr) * 0.05,
                           yr[1] - dln.valrange(yr) * 0.05),
                       ha='right',
                       color=co)
        #               xy=(xr[1]-dln.valrange(xr)*0.05, yr[1]-dln.valrange(yr)*0.05),ha='right',color=co)

        # Progress bar
        frameratio = (i + 1) / float(nind)
        timeratio = (meas['mjd'][ind2[i]] -
                     np.min(meas['mjd'])) / dln.valrange(meas['mjd'])
        #ratio = frameratio
        ratio = timeratio
        print('ratio = ' + str(100 * ratio))
        barim = np.zeros((100, 100), int)
        ind = dln.limit(int(round(ratio * 100)), 1, 99)
        barim[:, 0:ind] = 1
        ax[1].imshow(barim.T, origin='lower', aspect='auto', cmap='Greys')
        ax[1].set_xlabel('%7.1f \n days' %
                         (meas['mjd'][ind2[i]] - np.min(meas['mjd'])))
        #ax[1].set_xlabel('%d/%d' % (i+1,nind))
        ax[1].set_title('%d/%d' % (i + 1, nind))
        ax[1].axes.xaxis.set_ticks([])
        #ax[1].axes.xaxis.set_visible(False)
        ax[1].axes.yaxis.set_visible(False)
        #ax[1].axis('off')
        right_side = ax[1].spines['right']
        right_side.set_visible(False)
        left_side = ax[1].spines['left']
        left_side.set_visible(False)
        top_side = ax[1].spines['top']
        top_side.set_visible(False)

        plt.savefig(figfile)
        print('Cutout written to ' + figfile)

        #import pdb; pdb.set_trace()

    avgim = np.sum(cutimarr, axis=2) / nind
    avgim *= instheight0 / np.max(avgim)
    medim = np.median(cutimarr, axis=2)

    # Make a single blank file at the end so you know it looped
    figfile = outdir
    figfile += '%s_%04d_%s.jpg' % (str(obj['id'][0]), i + 2, 'path')
    figfiles.append(figfile)
    matplotlib.use('Agg')
    if os.path.exists(figfile): os.remove(figfile)
    fig = plt.gcf()  # get current graphics window
    fig.clf()  # clear
    gskw = dict(width_ratios=[30, 1])
    fig, ax = plt.subplots(ncols=2, nrows=1, gridspec_kw=gskw)
    fig.set_figheight(figheight)
    fig.set_figwidth(figwidth)
    ax[0].imshow(avgim,
                 origin='lower',
                 aspect='auto',
                 interpolation='none',
                 extent=(xr[0], xr[1], yr[0], yr[1]),
                 vmin=vmin,
                 vmax=vmax,
                 cmap='viridis')  # viridis, Greys, jet
    ax[0].plot(np.array([0, 0]),
               np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
               c='white',
               alpha=0.7,
               zorder=1)
    ax[0].plot(np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
               np.array([0, 0]),
               c='white',
               alpha=0.7,
               zorder=1)
    xmeas = np.array(xmeas)
    ymeas = np.array(ymeas)
    ax[0].plot((xmeas - hpix) * pixscale, (ymeas - hpix) * pixscale, c='r')
    #plt.scatter((xmeas-hpix)*pixscale,(ymeas-hpix)*pixscale,c='r',marker='+',s=30)
    ax[0].set_xlabel(r'$\Delta$ RA (arcsec)')
    ax[0].set_ylabel(r'$\Delta$ DEC (arcsec)')
    ax[0].set_xlim((xr[1], xr[0]))  # sky -right
    ax[0].set_ylim(yr)
    ax[1].axis('off')
    plt.savefig(figfile)
    # Make four copies
    for j in np.arange(2, 11):
        #pathfile = figfile.replace('path1','path'+str(j))
        pathfile = figfile.replace('%04d' % (i + 2), '%04d' % (i + 1 + j))
        if os.path.exists(pathfile): os.remove(pathfile)
        shutil.copyfile(figfile, pathfile)
        figfiles.append(pathfile)

    # Make the animated gif
    animfile = outdir + str(objid) + '_cutouts.gif'
    if os.path.exists(animfile): os.remove(animfile)
    # put list of files in a separate file
    listfile = outdir + str(objid) + '_cutouts.lst'
    if os.path.exists(listfile): os.remove(listfile)
    dln.writelines(listfile, figfiles)
    delay = dln.scale(nind, [20, 1000], [20, 1])
    delay = int(np.round(dln.limit(delay, 1, 20)))
    print('delay = ' + str(delay))
    print('Creating animated gif ' + animfile)
    #ret = subprocess.run('convert -delay 100 '+figdir+str(objid)+'_*.jpg '+animfile,shell=True)
    #ret = subprocess.run('convert -delay 20 '+' '.join(figfiles)+' '+animfile,shell=True)
    ret = subprocess.run('convert @' + listfile + ' -delay ' + str(delay) +
                         ' ' + animfile,
                         shell=True)
    #import pdb; pdb.set_trace()
    dln.remove(figfiles)
コード例 #7
0
def modelmag(tab, instfilt, dec, eqnfile):
    """
    This calculates the model magnitudes for the NSC catalog
    given a catalog with the appropriate information

    Parameters
    ----------
    tab : table
       Catalog of sources with appropriate magnitude
         columns.
    instfilt : str
       Short instrument and filter name, e.g. 'c4d-g'.
    dec : float
       The declination of the exposure.
    eqnfile : str
       File with the model magnitude equations.

    Returns
    -------
    model_mag : numpy array
       An [Nsource,3] array with model magnitudes, errors and color.

    Example
    -------

    model_mag = modelmag(cat,'c4d-g',-50.0,'modelmag_equations.txt')

    By D. Nidever  Feb 2019
    Translated to Python by D. Nidever, April 2022
    """

    # This calculates the model magnitude for stars given the
    # the magnitudes in reference catalogs
    # NUV - Galex NUV magnitude
    # GMAG - Gaia G magnitude
    # JMAG - 2MASS J magnitude
    # KMAG - 2MASS Ks magnitude
    # APASS_GMAG - APASS g magnitue
    # APASS_RMAG - APASS r magnitude
    # EBV  - E(B-V) reddening

    ntab = len(tab)
    colnames = tab.colnames
    for n in tab.colnames:
        tab[n].name = n.upper()
    tabcols = np.char.array(tab.colnames)

    ## Load the model magnitude equation information
    ## band, dec range, color equation, color min/max range, quality cuts, model mag equation
    if os.path.exists(eqnfile) == False:
        raise ValueError(eqnfile + ' NOT FOUND')
    eqnstr = Table.read(eqnfile, format='ascii')
    for c in eqnstr.colnames:
        eqnstr[c].name = c.lower()
    neqn = len(eqnstr)
    ## Get COLOR and DEC ranges
    eqnstr['colorlim'] = np.zeros((len(eqnstr), 2), float)
    eqnstr['declim'] = np.zeros((len(eqnstr), 2), float)
    for i in range(len(eqnstr)):
        # Color range
        cr = np.char.array(eqnstr['colorange'][i].split(','))
        cr = cr.replace('[', '').replace(']', '')
        eqnstr['colorlim'][i] = np.array(cr).astype(float)
        # DEC range
        dr = np.char.array(eqnstr['decrange'][i].split(','))
        dr = dr.replace('[', '').replace(']', '')
        eqnstr['declim'][i] = np.array(dr).astype(float)

    ## Get the band for this INSTRUMENT-FILTER and DEC.
    gd, = np.where((np.char.array(eqnstr['instrument']) + '-' +
                    np.char.array(eqnstr['band']) == instfilt)
                   & (dec >= eqnstr['declim'][:, 0])
                   & (dec <= eqnstr['declim'][:, 1]))
    if len(gd) == 0:
        raise ValueError('No model magnitude equation for INSTRUMENT-FILTER=' +
                         instfilt + ' and DEC=%.2f' % dec)
    if len(gd) > 1:
        print('Found multiple magnitude equation for INSTRUMENT-FILTER=' +
              instfilt + ' and DEC=%.2f. Using the first one' % dec)
        gd = gd[0]
    eqnstr1 = eqnstr[gd]
    eqnstr1 = dict(zip(eqnstr1.colnames, eqnstr1[0]))  # convert to dictionary

    ## No parentheses allowed
    if eqnstr1['coloreqn'].find('(') != -1 or eqnstr1['coloreqn'].find(')') != -1 or \
       eqnstr1['modelmageqn'].find('(') != -1 or eqnstr1['modelmageqn'].find(')') != -1:
        raise ValueError(
            'No parentheses allowed in the model magnitude equations')

    ## Are we using color?
    if eqnstr1['colorlim'][0] < -10 and eqnstr1['colorlim'][
            1] > 10 and eqnstr1['modelmageqn'].upper().find('COLOR') == -1:
        usecolor = False
    else:
        usecolor = True

    ## Get all columns that we need
    coloreqn = eqnstr1['coloreqn']
    qualitycuts = eqnstr1['qualitycuts']
    modelmageqn = eqnstr1['modelmageqn']
    coloreqn_cols = re.split('[-+*^]', coloreqn)
    modelmageqn_cols = re.split('[-+*^]', modelmageqn)

    if usecolor:
        cols = np.char.array(coloreqn_cols + modelmageqn_cols).upper()
    else:
        cols = np.char.array(modelmageqn_cols).upper()

    ## Remove numbers and "COLOR"
    isnumeric = np.array([dln.isnumber(c) for c in cols])
    bd, = np.where(isnumeric | (cols.upper() == 'COLOR') | (cols == '??'))
    if len(bd) > 0:
        if len(bd) < len(cols):
            cols = np.delete(cols, bd)
        else:
            cols = None
    ncols = len(cols)
    ## No columns left
    if ncols == 0:
        raise ValueError('No columns to use.')
    ## Only unique columns
    cols = np.unique(cols)
    ncols = len(cols)
    ind1, ind2 = dln.match(tabcols, cols)
    ntagmatch = len(ind1)
    if ntagmatch < ncols:
        leftind = np.arange(ncols)
        if ntagmatch > 0:
            leftind = np.delete(leftind, ind2)
        print('Needed columns missing. ' + ' '.join(cols[leftind]))

    ## Make the color
    ##  replace the columns by TAB[GD].COLUMN
    if usecolor:
        coloreqn_cols = np.char.array(re.split('[-+*^]', coloreqn)).upper()
        coloreqn_cols = np.unique(coloreqn_cols)  # unique ones
        bd, = np.where(np.array([dln.isnumber(c)
                                 for c in coloreqn_cols]))  ## Remove numbers
        if len(bd) > 0:
            coloreqn_cols = np.delete(coloreqn_cols, bd)
        colcmd = coloreqn.upper()
        for i in range(len(coloreqn_cols)):
            colcmd = colcmd.replace(coloreqn_cols[i],
                                    "tab['" + coloreqn_cols[i] + "']")
        color = eval(colcmd)
        color = np.array(color)
    else:
        color = np.zeros(ntab, float)

    ## Make quality cuts
    magcolsind, = np.where((dln.find(cols, 'MAG$') > -1)
                           & (cols.find('^E_') == -1) | (cols == 'NUV'))
    ## make sure all magnitudes are good (<50) and finite
    goodmask = np.ones(ntab, bool)
    for i in range(len(magcolsind)):
        magind, = np.where(tabcols.upper() == cols[magcolsind[i]].upper())
        if len(magind) == 0:
            print(cols[magcolsind[i]].upper() + ' column NOT found')
            return []
        goodmask &= ((tab[tabcols[magind[0]]] < 50) &
                     (tab[tabcols[magind[0]]] > 0)
                     & np.isfinite(tab[tabcols[magind[0]]]))

    ## input quality cuts
    ##  replace <=, <, >, >=, =, &, |
    qualitycuts = qualitycuts.replace('<=', ' <= ')
    qualitycuts = qualitycuts.replace('>=', ' >= ')
    qualitycuts = qualitycuts.replace('>', ' > ')
    qualitycuts = qualitycuts.replace('<', ' < ')
    qualitycuts = qualitycuts.replace('=', ' == ')
    qualitycuts = qualitycuts.replace('&', ' & ')
    qualitycuts = qualitycuts.replace('|', ' | ')
    ## fix column names
    qualitycuts_cols = qualitycuts.split()
    for i in range(len(qualitycuts_cols)):
        col = qualitycuts_cols[i]
        colind, = np.where(tabcols == col.upper())
        if len(colind) > 0:
            qualitycuts = qualitycuts.replace(
                tabcols[colind[0]], "tab['" + tabcols[colind[0]] + "']")
    goodmask &= eval(qualitycuts)

    ## Apply the color range
    if usecolor:
        goodmask &= ((color >= eqnstr1['colorlim'][0]) &
                     (color <= eqnstr1['colorlim'][1]))
    ## Get the sources that pass all cuts
    gd, = np.where(goodmask == True)
    if len(gd) == 0:
        print('No good sources left')
        return []

    # Make the model magnitude
    ##  replace the columns by TAB[GD].COLUMN
    modelmageqn_cols = np.char.array(re.split('[-+*^]', modelmageqn)).upper()
    bd, = np.where(
        np.array([dln.isnumber(c) for c in modelmageqn_cols]) |
        (modelmageqn_cols.upper() == 'COLOR'))  ## Remove numbers and "COLOR"
    if len(bd) > 0:
        modelmageqn_cols = np.delete(modelmageqn_cols, bd)
    modelmageqn_cols = np.unique(modelmageqn_cols)  # unique ones
    magcmd = modelmageqn.upper()
    for i in range(len(modelmageqn_cols)):
        magcmd = magcmd.replace(modelmageqn_cols[i],
                                "tab['" + modelmageqn_cols[i] + "'][gd]")
    magcmd = magcmd.replace('COLOR', 'COLOR[gd]')
    modelmag_gd = eval(magcmd)
    modelmag = np.zeros(ntab, float) + 99.99
    modelmag[gd] = modelmag_gd

    ## Make the error structure
    ##  Each magnitude has an E_MAG error except for PS and Gaia GMAG
    ## If we are using PS or GMAG then add the errors for the
    adderrcols = []
    psmagind, = np.where((dln.find(cols, '^PS_') > -1)
                         & (dln.find(cols, 'MAG$') > -1))
    if len(psmagind) > 0:
        adderrcols += list('E_' + cols[psmagind])
    nadderrcols = len(adderrcols)
    ## Making error structure
    errcolind = np.where(dln.find(tabcols, '^E_') > -1)
    errcols = tabcols[errcolind]
    errdt = []
    for i in range(len(errcols)):
        errdt += [(errcols[i], float)]
    if nadderrcols > 0:
        for i in range(nadderrcols):
            errdt += [(adderrcols[i], float)]
    err = np.zeros(ntab, dtype=np.dtype(errdt))
    err = Table(err)
    for c in err.colnames:
        err[c] = 0.001
    for n in err.colnames:
        if c in tab.colnames:
            err[n] = tab[n]
    ## leave the PS errors at 0.001
    ## convert NAN or 99.99 to 9.99 to be consistent
    for c in err.colnames:
        bd = np.where((err[c] > 10.0) | (np.isfinite(err[c]) == False))
        if len(bd) > 0:
            err[c][bd] = 9.99

    ## Calculate the color errors
    ## get the columns
    if usecolor:
        colorerr_cols = np.char.array(re.split('[-+*^]', coloreqn)).upper()
        colorerr_cols = np.unique(colorerr_cols)  # unique ones
        bd, = np.where(
            np.array([dln.isnumber(c) for c in colorerr_cols]) |
            (colorerr_cols.upper() == 'EBV'))  ## Remove numbers and "EBV"
        if len(bd) > 0:
            colorerr_cols = np.delete(colorerr_cols, bd)
        ## use - and + signs to break apart the components that need to be squared
        coloreqn_terms = np.char.array(re.split('[-+]', coloreqn)).upper()
        ## remove any terms that don't have a COLORERR_COLS in them
        okay = np.zeros(len(coloreqn_terms), bool)
        for i in range(len(coloreqn_terms)):
            for j in range(len(colorerr_cols)):
                okay[i] |= (coloreqn_terms[i].find(colorerr_cols[j]) > -1)
        bd, = np.where(okay == False)
        if len(bd) > 0:
            coloreqn_terms = np.delete(coloreqn_terms, bd)
        ## Now create the equation, add in quadrature
        colorerrcmd = 'np.sqrt( ' + '+'.join('(' + coloreqn_terms.upper() +
                                             ')**2') + ' )'
        #colorerrcmd = colorerrcmd.upper()
        for i in range(len(colorerr_cols)):
            colorerrcmd = colorerrcmd.replace(
                colorerr_cols[i], "err['E_" + colorerr_cols[i] + "'][gd]")
        colorerr_gd = eval(colorerrcmd)
        colorerr = np.zeros(ntab, float) + 9.99
        colorerr[gd] = colorerr_gd
    else:
        colorerr = np.zeros(ntab, float)

    ## The modelmag errors
    ## get the columns
    modelmagerr_cols = np.char.array(re.split('[-+*^]', modelmageqn)).upper()
    modelmagerr_cols = np.unique(modelmagerr_cols)  # unique ones
    bd, = np.where(
        np.array([dln.isnumber(c) for c in modelmagerr_cols]) |
        (modelmagerr_cols.upper() == 'EBV'))  ## Remove numbers and "EBV"
    if len(bd) > 0:
        modelmagerr_cols = np.delete(modelmagerr_cols, bd)
    ##   use - and + signs to break apart the components that need to be  squared
    modelmageqn_terms = np.char.array(re.split('[-+]', modelmageqn)).upper()
    ## remove any terms that don't have a COLORERR_COLS in them
    okay = np.zeros(len(modelmageqn_terms), bool)
    for i in range(len(modelmageqn_terms)):
        for j in range(len(modelmagerr_cols)):
            okay[i] |= (modelmageqn_terms[i].find(modelmagerr_cols[j]) > -1)
    bd, = np.where(okay == False)
    if len(bd) > 0:
        modelmageqn_terms = np.delete(modelmageqn_terms, bd)
    ## Now create the equation, add in quadrature
    modelmagerrcmd = 'np.sqrt( ' + '+'.join('(' + modelmageqn_terms +
                                            ')**2') + ' )'
    for i in range(len(modelmageqn_cols)):
        modelmagerrcmd = modelmagerrcmd.replace(
            modelmageqn_cols[i],
            "err['E_" + modelmageqn_cols[i].upper() + "'][gd]")
    modelmagerrcmd = modelmagerrcmd.replace('COLOR', 'COLORERR[gd]')
    modelmagerr_gd = eval(modelmagerrcmd)
    modelmagerr = np.zeros(ntab, float) + 99.90
    modelmagerr[gd] = modelmagerr_gd

    ## combine modelmag and modelmagerr
    ## Combine mags and errors
    mags = np.zeros((len(tab), 3), float)
    mags[:, 0] = modelmag
    mags[:, 1] = modelmagerr
    mags[:, 2] = color

    # Change back to original column names
    for n in colnames:
        tab[n.upper()].name = n

    return mags
コード例 #8
0
def measurement_update(expdir):

    t0 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    # Get version number from exposure directory
    lo = expdir.find('nsc/instcal/')
    dum = expdir[lo + 12:]
    version = dum[0:dum.find('/')]
    cmbdir = '/net/dl2/dnidever/nsc/instcal/' + version + '/'
    edir = '/net/dl1/users/dnidever/nsc/instcal/' + version + '/'
    nside = 128

    # Check if output file already exists
    base = os.path.basename(expdir)

    # Log file
    #------------------
    # format is nsc_combine_main.DATETIME.log
    ltime = time.localtime()
    # time.struct_time(tm_year=2019, tm_mon=7, tm_mday=22, tm_hour=0, tm_min=30, tm_sec=20, tm_wday=0, tm_yday=203, tm_isdst=1)
    smonth = str(ltime[1])
    if ltime[1] < 10: smonth = '0' + smonth
    sday = str(ltime[2])
    if ltime[2] < 10: sday = '0' + sday
    syear = str(ltime[0])[2:]
    shour = str(ltime[3])
    if ltime[3] < 10: shour = '0' + shour
    sminute = str(ltime[4])
    if ltime[4] < 10: sminute = '0' + sminute
    ssecond = str(int(ltime[5]))
    if ltime[5] < 10: ssecond = '0' + ssecond
    logtime = smonth + sday + syear + shour + sminute + ssecond
    logfile = expdir + '/' + base + '_measure_update.' + logtime + '.log'
    if os.path.exists(logfile): os.remove(logfile)

    # Set up logging to screen and logfile
    logFormatter = logging.Formatter(
        "%(asctime)s [%(levelname)-5.5s]  %(message)s")
    rootLogger = logging.getLogger()
    fileHandler = logging.FileHandler(logfile)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)
    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)
    rootLogger.setLevel(logging.NOTSET)

    rootLogger.info(
        'Adding objectID for measurement catalogs for exposure = ' + base)
    rootLogger.info("expdir = " + expdir)
    rootLogger.info("host = " + host)
    rootLogger.info(" ")

    #  Load the exposure and metadata files
    metafile = expdir + '/' + base + '_meta.fits'
    meta = Table.read(metafile, 1)
    nmeta = len(meta)
    chstr = Table.read(metafile, 2)
    rootLogger.info('KLUDGE!!!  Changing /dl1 filenames to /dl2 filenames')
    cols = ['EXPDIR', 'FILENAME', 'MEASFILE']
    for c in cols:
        f = np.char.array(chstr[c]).decode()
        f = np.char.array(f).replace('/dl1/users/dnidever/', '/dl2/dnidever/')
        chstr[c] = f
    nchips = len(chstr)

    measdtype = np.dtype([('MEASID', 'S50'), ('OBJECTID', 'S50'),
                          ('EXPOSURE', 'S50'), ('CCDNUM', '>i2'),
                          ('FILTER', 'S2'), ('MJD', '>f8'), ('X', '>f4'),
                          ('Y', '>f4'), ('RA', '>f8'), ('RAERR', '>f4'),
                          ('DEC', '>f8'), ('DECERR', '>f4'),
                          ('MAG_AUTO', '>f4'), ('MAGERR_AUTO', '>f4'),
                          ('MAG_APER1', '>f4'), ('MAGERR_APER1', '>f4'),
                          ('MAG_APER2', '>f4'), ('MAGERR_APER2', '>f4'),
                          ('MAG_APER4', '>f4'), ('MAGERR_APER4', '>f4'),
                          ('MAG_APER8', '>f4'), ('MAGERR_APER8', '>f4'),
                          ('KRON_RADIUS', '>f4'), ('ASEMI', '>f4'),
                          ('ASEMIERR', '>f4'), ('BSEMI', '>f4'),
                          ('BSEMIERR', '>f4'), ('THETA', '>f4'),
                          ('THETAERR', '>f4'), ('FWHM', '>f4'),
                          ('FLAGS', '>i2'), ('CLASS_STAR', '>f4')])

    # Load and concatenate the meas catalogs
    chstr['MEAS_INDEX'] = 0  # keep track of where each chip catalog starts
    count = 0
    meas = Table(data=np.zeros(int(np.sum(chstr['NMEAS'])), dtype=measdtype))
    rootLogger.info('Loading and concatenating the chip measurement catalogs')
    for i in range(nchips):
        meas1 = Table.read(chstr['MEASFILE'][i].strip(),
                           1)  # load chip meas catalog
        nmeas1 = len(meas1)
        meas[count:count + nmeas1] = meas1
        chstr['MEAS_INDEX'][i] = count
        count += nmeas1
    measid = np.char.array(meas['MEASID']).strip().decode()
    nmeas = len(meas)
    rootLogger.info(str(nmeas) + ' measurements')

    # Get the OBJECTID from the combined healpix file IDSTR structure
    #  remove any sources that weren't used

    # Figure out which healpix this figure overlaps
    pix = hp.ang2pix(nside, meas['RA'], meas['DEC'], lonlat=True)
    upix = np.unique(pix)
    npix = len(upix)
    rootLogger.info(str(npix) + ' HEALPix to query')

    # Loop over the HEALPix pixels
    ntotmatch = 0
    idstr_dtype = np.dtype([('measid', np.str, 200), ('objectid', np.str, 200),
                            ('pix', int)])
    idstr = np.zeros(nmeas, dtype=idstr_dtype)
    cnt = 0
    for i in range(npix):
        fitsfile = cmbdir + 'combine/' + str(int(upix[i]) // 1000) + '/' + str(
            upix[i]) + '.fits.gz'
        dbfile = cmbdir + 'combine/' + str(int(upix[i]) // 1000) + '/' + str(
            upix[i]) + '_idstr.db'
        if os.path.exists(dbfile):
            # Read meas id information from idstr database for this expoure
            idstr1 = readidstrdb(dbfile, where="exposure=='" + base + "'")
            nidstr1 = len(idstr1)
            if nidstr1 > 0:
                idstr['measid'][cnt:cnt + nidstr1] = idstr1['measid']
                idstr['objectid'][cnt:cnt + nidstr1] = idstr1['objectid']
                idstr['pix'][cnt:cnt + nidstr1] = upix[i]
                cnt += nidstr1
            rootLogger.info(
                str(i + 1) + ' ' + str(upix[i]) + ' ' + str(nidstr1))

        else:
            rootLogger.info(
                str(i + 1) + ' ' + dbfile +
                ' NOT FOUND.  Checking for high-resolution database files.')
            # Check if there are high-resolution healpix idstr databases
            hidbfiles = glob(cmbdir + 'combine/' + str(int(upix[i]) // 1000) +
                             '/' + str(upix[i]) + '_n*_*_idstr.db')
            nhidbfiles = len(hidbfiles)
            if os.path.exists(fitsfile) & (nhidbfiles > 0):
                rootLogger.info('Found high-resolution HEALPix IDSTR files')
                for j in range(nhidbfiles):
                    dbfile1 = hidbfiles[j]
                    dbbase1 = os.path.basename(dbfile1)
                    idstr1 = readidstrdb(dbfile1,
                                         where="exposure=='" + base + "'")
                    nidstr1 = len(idstr1)
                    if nidstr1 > 0:
                        idstr['measid'][cnt:cnt + nidstr1] = idstr1['measid']
                        idstr['objectid'][cnt:cnt +
                                          nidstr1] = idstr1['objectid']
                        idstr['pix'][cnt:cnt + nidstr1] = upix[i]
                        cnt += nidstr1
                    rootLogger.info('  ' + str(j + 1) + ' ' + dbbase1 + ' ' +
                                    str(upix[i]) + ' ' + str(nidstr1))

    # Trim any leftover elements of IDSTR
    if cnt < nmeas:
        idstr = idstr[0:cnt]

    # Now match them all up
    rootLogger.info('Matching the measurements')
    idstr_measid = np.char.array(idstr['measid']).strip()
    idstr_objectid = np.char.array(idstr['objectid']).strip()
    ind1, ind2 = dln.match(idstr_measid, measid)
    nmatch = len(ind1)
    if nmatch > 0:
        meas['OBJECTID'][ind2] = idstr_objectid[ind1]

    # Only keep sources with an objectid
    ind, nind = dln.where(
        np.char.array(meas['OBJECTID']).strip().decode() == '')
    # There can be missing/orphaned measurements at healpix boundaries in crowded
    # regions when the DBSCAN eps is different.  But there should be very few of these.
    # At this point, let's allow this to pass
    if nind > 0:
        rootLogger.info('WARNING: ' + str(nind) +
                        ' measurements are missing OBJECTIDs')
    if ((nmeas >= 20000) & (nind > 20)) | ((nmeas < 20000) & (nind > 3)):
        rootLogger.info('More missing OBJECTIDs than currently allowed.')
        raise ValueError('More missing OBJECTIDs than currently allowed.')

    # Output the updated catalogs
    #rootLogger.info('Updating measurement catalogs')
    #for i in range(nchips):
    #    measfile1 = chstr['MEASFILE'][i].strip()
    #    lo = chstr['MEAS_INDEX'][i]
    #    hi = lo+chstr['NMEAS'][i]
    #    meas1 = meas[lo:hi]
    #    meta1 = Table.read(measfile1,2)        # load the meta extensions
    #    # 'KLUDGE!!!  Changing /dl1 filenames to /dl2 filenames')
    #    cols = ['EXPDIR','FILENAME','MEASFILE']
    #    for c in cols:
    #        f = np.char.array(meta1[c]).decode()
    #        f = np.char.array(f).replace('/dl1/users/dnidever/','/dl2/dnidever/')
    #        meta1[c] = f
    #    # Copy as a backup
    #    if os.path.exists(measfile1+'.bak'): os.remove(measfile1+'.bak')
    #    dum = shutil.move(measfile1,measfile1+'.bak')
    #    # Write new catalog
    #    #meas1.write(measfile1,overwrite=True)  # first, measurement table
    #    # append other fits binary tabl
    #    #hdulist = fits.open(measfile1)
    #    rootLogger.info('Writing '+measfile1)
    #    hdulist = fits.HDUList()
    #    hdulist.append(fits.table_to_hdu(meas1))       # first, meas catalog
    #    hdulist.append(fits.table_to_hdu(meta1))       # second, meta
    #    hdulist.writeto(measfile1,overwrite=True)
    #    hdulist.close()
    #    # Create a file saying that the file was successfully updated.
    #    dln.writelines(measfile1+'.updated','')
    #    # Delete backups
    #    if os.path.exists(measfile1+'.bak'): os.remove(measfile1+'.bak')

    # Output the updated measurement catalog
    #  Writing a single FITS file is much faster than many small ones
    measfile = expdir + '/' + base + '_meas.fits'
    meas.write(measfile, overwrite=True)
    if os.path.exists(measfile + '.gz'): os.remove(measfile + '.gz')
    ret = subprocess.call(['gzip', measfile])  # compress final catalog

    # Update the meta file as well, need to the /dl2 filenames
    rootLogger.info('Updating meta file')
    meta.write(metafile, overwrite=True)
    hdulist = fits.open(metafile)
    hdu = fits.table_to_hdu(chstr)
    hdulist.append(hdu)
    hdulist.writeto(metafile, overwrite=True)
    hdulist.close()

    # Create a file saying that the files were updated okay.
    dln.writelines(expdir + '/' + base + '_meas.updated', '')

    rootLogger.info('dt = ' + str(time.time() - t0) + ' sec.')
コード例 #9
0
def meascutout(meas, obj, size=10, outdir='./'):
    """ Input the measurements and create cutouts. """

    expstr = fits.getdata(
        '/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposures.fits.gz', 1)
    #expstr = fits.getdata('/net/dl2/dnidever/nsc/instcal/v3/lists/nsc_v3_exposure.fits.gz',1)
    decam = Table.read('/home/dnidever/projects/delvered/data/decam.txt',
                       format='ascii')

    objid = obj['id'][0]

    # Sort by MJD
    si = np.argsort(meas['mjd'])
    meas = meas[si]

    ind1, ind2 = dln.match(expstr['base'], meas['exposure'])
    nind = len(ind1)
    if nind == 0:
        print('No matches')
        return
    # Sort by input meas catalog
    si = np.argsort(ind2)
    ind1 = ind1[si]
    ind2 = ind2[si]

    # Create the reference WCS
    wref = WCS(naxis=2)
    pixscale = 0.26  # DECam, "/pix
    npix = round(size / pixscale)
    if npix % 2 == 0:  # must be odd
        npix += 1
    hpix = npix // 2  # center of image
    wref.wcs.ctype = ['RA---TAN', 'DEC--TAN']
    wref.wcs.crval = [obj['ra'][0], obj['dec'][0]]
    wref.wcs.crpix = [npix // 2, npix // 2]
    wref.wcs.cd = np.array([[pixscale / 3600.0, 0.0], [0.0, pixscale / 3600]])
    wref.array_shape = (npix, npix)
    refheader = wref.to_header()
    refheader['NAXIS'] = 2
    refheader['NAXIS1'] = npix
    refheader['NAXIS2'] = npix

    # Load the data
    instrument = expstr['instrument'][ind1]
    fluxfile = expstr['file'][ind1]
    fluxfile = fluxfile.replace('/net/mss1/', '/mss1/')  # for thing/hulk
    maskfile = expstr['maskfile'][ind1]
    maskfile = maskfile.replace('/net/mss1/', '/mss1/')  # for thing/hulk
    ccdnum = meas['ccdnum'][ind2]
    figfiles = []
    for i in range(nind):
        try:
            if instrument[i] == 'c4d':
                dind, = np.where(decam['CCDNUM'] == ccdnum[i])
                extname = decam['NAME'][dind[0]]
                im, head = getfitsext(fluxfile[i], extname, header=True)
                mim, mhead = getfitsext(maskfile[i], extname, header=True)
                #im,head = fits.getdata(fluxfile[i],header=True,extname=extname)
                #mim,mhead = fits.getdata(maskfile[i],header=True,extname=extname)
            else:
                im, head = fits.getdata(fluxfile[i], ccdnum[i], header=True)
                mim, mhead = fits.getdata(maskfile[i], ccdnum[i], header=True)
        except:
            print('error')
            import pdb
            pdb.set_trace()

        # Get chip-level information
        exposure = os.path.basename(fluxfile[i])[0:-8]  # remove fits.fz
        chres = qc.query(sql="select * from nsc_dr2.chip where exposure='" +
                         exposure + "' and ccdnum=" + str(ccdnum[i]),
                         fmt='table')

        w = WCS(head)
        # RA/DEC correction for the object
        lon = obj['ra'][0] - chres['ra'][0]
        lat = obj['dec'][0] - chres['dec'][0]
        racorr = chres['ra_coef1'][0] + chres['ra_coef2'][0] * lon + chres[
            'ra_coef3'][0] * lon * lat + chres['ra_coef4'][0] * lat
        deccorr = chres['dec_coef1'][0] + chres['dec_coef2'][0] * lon + chres[
            'dec_coef3'][0] * lon * lat + chres['dec_coef4'][0] * lat
        # apply these offsets to the header WCS CRVAL
        w.wcs.crval += [racorr, deccorr]
        head['CRVAL1'] += racorr
        head['CRVAL2'] += deccorr

        # Object X/Y position
        xobj, yobj = w.all_world2pix(obj['ra'], obj['dec'], 0)
        # Get the cutout
        xcen = meas['x'][ind2[i]] - 1  # convert to 0-indexes
        ycen = meas['y'][ind2[i]] - 1
        smim = dln.gsmooth(im, 2)
        # use the object coords for centering
        #cutim,xr,yr = cutout(smim,xobj,yobj,size)

        # Mask the bad pixels
        badmask = (mim > 0)
        im[badmask] = np.nanmedian(im[~badmask])

        # Create a common TAN WCS that each image gets interpoled onto!!!
        #hdu1 = fits.open(fluxfile[i],extname=extname)
        smim1 = dln.gsmooth(im, 1.5)
        hdu = fits.PrimaryHDU(smim1, head)
        cutim, footprint = reproject_interp(hdu, refheader,
                                            order='bicubic')  # biquadratic
        cutim[footprint == 0] = np.nanmedian(
            im[~badmask])  # set out-of-bounds to background
        #xr = [0,npix-1]
        #yr = [0,npix-1]
        xr = [-hpix * pixscale, hpix * pixscale]
        yr = [-hpix * pixscale, hpix * pixscale]

        # exposure_ccdnum, filter, MJD, delta_MJD, mag
        print(
            str(i + 1) + ' ' + meas['exposure'][ind2[i]] + ' ' +
            str(ccdnum[i]) + ' ' + str(meas['x'][ind2[i]]) + ' ' +
            str(meas['y'][ind2[i]]) + ' ' + str(meas['mag_auto'][ind2[i]]))

        #figdir = '/net/dl2/dnidever/nsc/instcal/v3/hpm2/cutouts/'
        figfile = outdir
        figfile += '%s_%04d_%s_%02d.jpg' % (str(
            obj['id'][0]), i + 1, meas['exposure'][ind2[i]], ccdnum[i])
        figfiles.append(figfile)
        matplotlib.use('Agg')
        plt.rcParams.update({'font.size': 11})
        if os.path.exists(figfile): os.remove(figfile)
        fig = plt.gcf()  # get current graphics window
        fig.clf()  # clear

        figsize = 8.0  #6.0
        ax = fig.subplots()  # projection=wcs
        #fig.set_figheight(figsize*0.8)
        fig.set_figheight(figsize)
        fig.set_figwidth(figsize)
        med = np.nanmedian(smim)
        sig = dln.mad(smim)
        bigim, xr2, yr2 = cutout(smim, xcen, ycen, 151, missing=med)
        lmed = np.nanmedian(bigim)

        # Get the flux of the object and scale each image to the same height
        #meas.mag_aper1 = cat1.mag_aper[0] + 2.5*alog10(exptime) + chstr[i].zpterm
        #cmag = mag_auto + 2.5*alog10(exptime) + zpterm
        instmag = meas['mag_auto'][ind2[i]] - 2.5 * np.log10(
            chres['exptime'][0]) - chres['zpterm'][0]
        #mag = -2.5*log(flux)+25.0
        instflux = 10**((25.0 - instmag) / 2.5)
        print('flux = ' + str(instflux))
        # Get height of object
        #  flux of 2D Gaussian is ~2*pi*height*sigma^2
        pixscale1 = np.max(np.abs(w.wcs.cd)) * 3600
        fwhm = chres['fwhm'][0] / pixscale1
        instheight = instflux / (2 * 3.14 * (fwhm / 2.35)**2)
        print('height = ' + str(instheight))
        # Scale the images to the flux level of the first image
        cutim -= lmed
        if i == 0:
            instflux0 = instflux.copy()
            instheight0 = instheight.copy()
        else:
            scale = instflux0 / instflux
            #scale = instheight0/instheight
            cutim *= scale
            print('scaling image by ' + str(scale))

        #vmin = lmed-8*sig  # 3*sig
        #vmax = lmed+12*sig  # 5*sig
        if i == 0:
            vmin = -8 * sig  # 3*sig
            #vmax = 12*sig  # 5*sig
            vmax = 0.5 * instheight  # 0.5
            vmin0 = vmin
            vmax0 = vmax
        else:
            vmin = vmin0
            vmax = vmax0

        print('vmin = ' + str(vmin))
        print('vmax = ' + str(vmax))

        plt.imshow(cutim,
                   origin='lower',
                   aspect='auto',
                   interpolation='none',
                   extent=(xr[0], xr[1], yr[0], yr[1]),
                   vmin=vmin,
                   vmax=vmax,
                   cmap='viridis')  # viridis, Greys, jet
        #plt.imshow(cutim,origin='lower',aspect='auto',interpolation='none',
        #           vmin=vmin,vmax=vmax,cmap='viridis')   # viridis, Greys, jet
        #plt.colorbar()

        # show one vertical, one horizontal line pointing to the center but offset
        # then a small dot on the meas position
        # 13, 8
        plt.plot(np.array([0, 0]),
                 np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
                 c='white',
                 alpha=0.7)
        plt.plot(np.array([-0.066 * npix, 0.066 * npix]) * pixscale,
                 np.array([0, 0]),
                 c='white',
                 alpha=0.7)

        # Meas X/Y position
        xmeas, ymeas = wref.all_world2pix(meas['ra'][ind2[i]],
                                          meas['dec'][ind2[i]], 0)
        plt.scatter([(xmeas - hpix) * pixscale], [(ymeas - hpix) * pixscale],
                    c='r',
                    marker='+',
                    s=20)
        #plt.scatter([xmeas],[ymeas],c='r',marker='+',s=100)
        #plt.scatter([xcen],[ycen],c='r',marker='+',s=100)
        # Object X/Y position
        #xobj,yobj = w.all_world2pix(obj['ra'],obj['dec'],0)
        xobj, yobj = wref.all_world2pix(obj['ra'], obj['dec'], 0)
        #plt.scatter(xobj,yobj,marker='o',s=200,facecolors='none',edgecolors='y',linewidth=3)
        #plt.scatter(xobj,yobj,c='y',marker='+',s=100)
        #leg = ax.legend(loc='upper left', frameon=False)
        plt.xlabel(r'$\Delta$ RA (arcsec)')
        plt.ylabel(r'$\Delta$ DEC (arcsec)')
        #plt.xlabel('X')
        #plt.ylabel('Y')
        #plt.xlim(xr)
        #plt.ylim(yr)
        #ax.annotate(r'S/N=%5.1f',xy=(np.mean(xr), yr[0]+dln.valrange(yr)*0.05),ha='center')
        co = 'white'  #'lightgray' # blue
        ax.annotate('%s  %02d  %s  %6.1f  ' %
                    (meas['exposure'][ind2[i]], ccdnum[i],
                     meas['filter'][ind2[i]], expstr['exptime'][ind1[i]]),
                    xy=(np.mean(xr), yr[0] + dln.valrange(yr) * 0.05),
                    ha='center',
                    color=co)
        ax.annotate(
            '%10.2f  %10.2f  ' %
            (meas['mjd'][ind2[i]], meas['mjd'][ind2[i]] - np.min(meas['mjd'])),
            xy=(xr[0] + dln.valrange(xr) * 0.05,
                yr[1] - dln.valrange(yr) * 0.05),
            ha='left',
            color=co)
        ax.annotate('%s = %5.2f +/- %4.2f' %
                    (meas['filter'][ind2[i]], meas['mag_auto'][ind2[i]],
                     meas['magerr_auto'][ind2[i]]),
                    xy=(xr[1] - dln.valrange(xr) * 0.05,
                        yr[1] - dln.valrange(yr) * 0.05),
                    ha='right',
                    color=co)
        plt.savefig(figfile)
        print('Cutout written to ' + figfile)

        #import pdb; pdb.set_trace()

    # Make a single blank file at the end so you know it looped
    figfile = outdir
    figfile += '%s_%04d_%s.jpg' % (str(obj['id'][0]), i + 2, 'blank')
    figfiles.append(figfile)
    matplotlib.use('Agg')
    if os.path.exists(figfile): os.remove(figfile)
    fig = plt.gcf()  # get current graphics window
    fig.clf()  # clear
    figsize = 8.0  #6.0
    fig.set_figheight(figsize)
    fig.set_figwidth(figsize)
    plt.savefig(figfile)
    print(figfile)

    # Make the animated gif
    animfile = outdir + str(objid) + '_cutouts.gif'
    print('Creating animated gif ' + animfile)
    if os.path.exists(animfile): os.remove(animfile)
    #ret = subprocess.run('convert -delay 100 '+figdir+str(objid)+'_*.jpg '+animfile,shell=True)
    ret = subprocess.run('convert -delay 20 ' + ' '.join(figfiles) + ' ' +
                         animfile,
                         shell=True)
    #import pdb; pdb.set_trace()
    dln.remove(figfiles)
コード例 #10
0
    if len(totrim) > 0:
        # Trim objects
        idstr = np.delete(idstr, totrim)
        #idstr = utils.remove_indices(idstr,totrim)
        # Update IDSTR.objectindex
        old_idstr_objectindex = idstr['objectindex']
        idstr['objectindex'] = newobjindex[old_idstr_objectindex]

    # Create final summary structure from ALLMETA
    #  get exposures that are in IDSTR
    #  sometimes EXPNUM numbers have the leading 0s removed
    #  and sometimes not, so turn to LONG to match
    dum, uiexpnum = np.unique(idstr['expnum'].astype(int), return_index=True)
    uexpnum = idstr[uiexpnum]['expnum'].astype(int)
    nuexpnum = len(uexpnum)
    ind1, ind2 = utils.match(allmeta['expnum'].astype(int), uexpnum)
    nmatch = len(ind1)
    sumstr = Table(allmeta[ind1])
    col_nobj = Column(name='nobjects', dtype=np.int, length=len(sumstr))
    col_healpix = Column(name='healpix', dtype=np.int, length=len(sumstr))
    sumstr.add_columns([col_nobj, col_healpix])
    sumstr['nobjects'] = 0
    sumstr['healpix'] = pix
    # get number of objects per exposure
    expnum = idstr['expnum'].astype(int)
    siexp = np.argsort(expnum)
    expnum = expnum[siexp]
    if nuexpnum > 1:
        brklo, = np.where(expnum != np.roll(expnum, 1))
        nbrk = len(brklo)
        brkhi = np.hstack((brklo[1:nbrk], len(expnum)))
コード例 #11
0
def fix_pms(objectid):
    """ Correct the proper motions in the healpix object catalog."""

    t00 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    version = 'v3'
    radeg = np.float64(180.00) / np.pi

    meas = qc.query(sql="select * from nsc_dr2.meas where objectid='"+objectid+"'",fmt='table')
    nmeas = len(meas)
    print('  '+str(nmeas))
    mnra = np.median(meas['ra'].data)
    mndec = np.median(meas['dec'].data)

    lim = 20.0  # 50.0
    gd, = np.where( (np.abs(meas['ra'].data-mnra)/np.cos(np.deg2rad(mndec))*3600 < lim) &
                     (np.abs(meas['dec'].data-mndec)*3600 < lim))
    ngd = len(gd)
    nbd = nmeas-ngd
    print('bad measurements '+str(nbd))
    #if nbd==0:
    #    return None
    meas = meas[gd]

    # Make cut on FWHM
    # maybe only use values for 0.5*fwhm_chip to 1.5*fwhm_chip
    sql = "select chip.* from nsc_dr2.chip as chip join nsc_dr2.meas as meas on chip.exposure=meas.exposure and chip.ccdnum=meas.ccdnum"
    sql += " where meas.objectid='"+objectid+"'"
    chip = qc.query(sql=sql,fmt='table')
    ind3,ind4 = dln.match(chip['exposure'],meas['exposure'])
    si = np.argsort(ind4)   # sort by input meas catalog
    ind3 = ind3[si]
    ind4 = ind4[si]
    chip = chip[ind3]
    meas = meas[ind4]
    gdfwhm, = np.where((meas['fwhm'] > 0.2*chip['fwhm']) & (meas['fwhm'] < 2.0*chip['fwhm']))
    if len(gdfwhm)==0:
        print('All measurements have bad FWHM values')
        return
    if len(gdfwhm) < len(meas):
        print('Removing '+str(len(meas)-len(gdfwhm))+' measurements with bad FWHM values')
        meas = meas[gdfwhm]


    raerr = np.array(meas['raerr']*1e3,np.float64)    # milli arcsec
    ra = np.array(meas['ra'],np.float64)
    ra -= np.mean(ra)
    ra *= 3600*1e3 * np.cos(mndec/radeg)     # convert to true angle, milli arcsec
    t = np.array(meas['mjd'].copy())
    t -= np.mean(t)
    t /= 365.2425                          # convert to year
    # Calculate robust slope
    try:
        #pmra, pmraerr = dln.robust_slope(t,ra,raerr,reweight=True)

        # LADfit
        pmra_ladcoef, absdev = dln.ladfit(t,ra)
        pmra_lad = pmra_ladcoef[1]

        # Run RANSAC
        ransac = linear_model.RANSACRegressor()
        ransac.fit(t.reshape(-1,1), ra)
        inlier_mask = ransac.inlier_mask_
        outlier_mask = np.logical_not(inlier_mask)
        gdmask = inlier_mask
        pmra_ransac = ransac.estimator_.coef_[0]
        print('  ransac '+str(np.sum(inlier_mask))+' inliers   '+str(np.sum(outlier_mask))+' outliers')

        # Robust, weighted linear with with INLIERS
        #pmra_coef, pmra_coeferr = dln.poly_fit(t[gdmask],ra[gdmask],1,sigma=raerr[gdmask],robust=True,error=True)
        #pmra_coef, pmra_coeferr = dln.poly_fit(t,ra,1,sigma=raerr,robust=True,error=True)
        #pmra = pmra_coef[0]
        #pmraerr = pmra_coeferr[0]
        #radiff = ra-dln.poly(t,pmra_coef)
        radiff = ra-t*pmra_lad
        radiff -= np.median(radiff)
        rasig = dln.mad(radiff)
        # Reject outliers
        gdsig = (np.abs(radiff) < 2.5*rasig) | (np.abs(radiff) < 2.5*raerr)
        print('  '+str(nmeas-np.sum(gdsig))+' 2.5*sigma clip outliers rejected')
        #if np.sum(gdsig) < nmeas:
        pmra_coef, pmra_coeferr = dln.poly_fit(t[gdsig],ra[gdsig],1,sigma=raerr[gdsig],robust=True,error=True)
        pmra = pmra_coef[0]
        pmraerr = pmra_coeferr[0]
        rasig = dln.mad(ra-dln.poly(t,pmra_coef))
    except:
        print('problem')
        #import pdb; pdb.set_trace()
        return np.append(np.zeros(10,float)+np.nan, np.zeros(2,int))

    decerr = np.array(meas['decerr']*1e3,np.float64)   # milli arcsec
    dec = np.array(meas['dec'],np.float64)
    dec -= np.mean(dec)
    dec *= 3600*1e3                         # convert to milli arcsec
    # Calculate robust slope
    try:
        #pmdec, pmdecerr = dln.robust_slope(t,dec,decerr,reweight=True)

        # LADfit
        pmdec_ladcoef, absdev = dln.ladfit(t,dec)
        pmdec_lad = pmdec_ladcoef[1]

        # Run RANSAC
        ransac = linear_model.RANSACRegressor()
        ransac.fit(t.reshape(-1,1), dec)
        inlier_mask = ransac.inlier_mask_
        outlier_mask = np.logical_not(inlier_mask)
        gdmask = inlier_mask
        pmdec_ransac = ransac.estimator_.coef_[0]
        print('  ransac '+str(np.sum(inlier_mask))+' inliers   '+str(np.sum(outlier_mask))+' outliers')

        # Robust, weighted linear with with INLIERS
        #pmdec_coef, pmdec_coeferr = dln.poly_fit(t[gdmask],dec[gdmask],1,sigma=decerr[gdmask],robust=True,error=True)
        #pmdec_coef, pmdec_coeferr = dln.poly_fit(t,dec,1,sigma=decerr,robust=True,error=True)
        #pmdec = pmdec_coef[0]
        #pmdecerr = pmdec_coeferr[0]
        #decdiff = dec-dln.poly(t,pmdec_coef)
        decdiff = dec-t*pmdec_lad
        decdiff -= np.median(decdiff)
        decsig = dln.mad(decdiff)
        # Reject outliers
        gdsig = (np.abs(decdiff) < 2.5*decsig) | (np.abs(decdiff) < 2.5*decerr)
        print('  '+str(nmeas-np.sum(gdsig))+' 2.5*sigma clip outliers rejected')
        #if np.sum(gdsig) < nmeas:
        pmdec_coef, pmdec_coeferr = dln.poly_fit(t[gdsig],dec[gdsig],1,sigma=decerr[gdsig],robust=True,error=True)
        pmdec = pmdec_coef[0]
        pmdecerr = pmdec_coeferr[0]
        decsig = dln.mad(dec-dln.poly(t,pmdec_coef))            

    except:
        print('problem')
        #import pdb; pdb.set_trace()
        return np.append(np.zeros(10,float)+np.nan, np.zeros(2,int))

    deltamjd = np.max(meas['mjd'])-np.min(meas['mjd'])
    out = np.array([pmra,pmraerr,pmra_ransac,pmra_lad,rasig,pmdec,pmdecerr,pmdec_ransac,pmdec_lad,decsig,nmeas,deltamjd])

    #print(out[[0,2,3]])
    #print(out[[5,7,8]])

    #import pdb; pdb.set_trace()

    return out
コード例 #12
0
ファイル: getpsf.py プロジェクト: dnidever/psfphot
def getpsf(psf,image,cat,fitradius=None,lookup=False,lorder=0,method='qr',subnei=False,
           allcat=None,maxiter=10,minpercdiff=1.0,reject=False,maxrejiter=3,verbose=False):
    """
    Fit PSF model to stars in an image with outlier rejection of badly-fit stars.

    Parameters
    ----------
    psf : PSF object
       PSF object with initial parameters to use.
    image : CCDData object
       Image to use to fit PSF model to stars.
    cat : table
       Catalog with initial amp/x/y values for the stars to use to fit the PSF.
    fitradius : float, table
       The fitting radius.  If none is input then the initial PSF FWHM will be used.
    lookup : boolean, optional
       Use an empirical lookup table.  Default is False.
    lorder : int, optional
       The order of the spatial variations (0=constant, 1=linear).  Default is 0.
    method : str, optional
       Method to use for solving the non-linear least squares problem: "qr",
       "svd", "cholesky", and "curve_fit".  Default is "qr".
    subnei : boolean, optional
       Subtract stars neighboring the PSF stars.  Default is False.
    allcat : table, optional
       Catalog of all objects in the image.  This is needed for bad PSF star
       rejection.
    maxiter : int, optional
       Maximum number of iterations to allow.  Only for methods "qr", "svd", and "cholesky".
       Default is 10.
    minpercdiff : float, optional
       Minimum percent change in the parameters to allow until the solution is
       considered converged and the iteration loop is stopped.  Only for methods
       "qr" and "svd".  Default is 1.0.
    reject : boolean, optional
       Reject PSF stars with high RMS values.  Default is False.
    maxrejiter : int, boolean
       Maximum number of PSF star rejection iterations.  Default is 3.
    verbose : boolean, optional
       Verbose output.

    Returns
    -------
    newpsf : PSF object
       New PSF object with the best-fit model parameters.
    pars : numpy array
       Array of best-fit model parameters
    perror : numpy array
       Uncertainties in "pars".
    psfcat : table
       Table of best-fitting amp/xcen/ycen values for the PSF stars.

    Example
    -------

    newpsf,pars,perror,psfcat = getpsf(psf,image,cat)

    """

    t0 = time.time()
    print = utils.getprintfunc() # Get print function to be used locally, allows for easy logging   

    # Fitting radius
    if fitradius is None:
        if type(psf)==models.PSFPenny:
            fitradius = psf.fwhm()*1.5
        else:
            fitradius = psf.fwhm()
        
    # subnei but no allcat input
    if subnei and allcat is None:
        raise ValueError('allcat is needed for PSF neighbor star subtraction')
        
    if 'id' not in cat.colnames:
        cat['id'] = np.arange(len(cat))+1
    psfcat = cat.copy()

    # Initializing output PSF star catalog
    dt = np.dtype([('id',int),('amp',float),('x',float),('y',float),('npix',int),('rms',float),
                   ('chisq',float),('ixmin',int),('ixmax',int),('iymin',int),('iymax',int),('reject',int)])
    outcat = np.zeros(len(cat),dtype=dt)
    outcat = Table(outcat)
    for n in ['id','x','y']:
        outcat[n] = cat[n]
    
    # Remove stars that are too close to the edge
    ny,nx = image.shape
    bd = (psfcat['x']<fitradius) | (psfcat['x']>(nx-1-fitradius)) | \
         (psfcat['y']<fitradius) | (psfcat['y']>(ny-1-fitradius))
    nbd = np.sum(bd)
    if nbd > 0:
        if verbose:
            print('Removing '+str(nbd)+' stars near the edge')
        psfcat = psfcat[~bd]

    # Generate an empirical image of the stars
    # and fit a model to it to get initial estimates
    if type(psf)!=models.PSFEmpirical:
        cube = starcube(psfcat,image,npix=psf.npix,fillvalue=np.nan)
        epsf,nbadstar,rms = mkempirical(cube,order=0)
        epsfim = CCDData(epsf,error=epsf.copy()*0+1,mask=~np.isfinite(epsf))
        pars,perror,mparams = psf.fit(epsfim,pars=[1.0,psf.npix/2,psf.npix//2],allpars=True)
        initpar = mparams.copy()
        curpsf = psf.copy()
        curpsf.params = initpar
        if verbose:
            print('Initial estimate from empirical PSF fit = '+str(mparams))
    else:
        curpsf = psf.copy()
        initpar = psf.params.copy()
        
    # Outlier rejection iterations
    nrejiter = 0
    flag = 0
    nrejstar = 100
    fitrad = fitradius
    useimage = image.copy()
    while (flag==0):
        if verbose:
            print('--- Iteration '+str(nrejiter+1)+' ---')                

        # Update the fitting radius
        if nrejiter>0:
            fitrad = curpsf.fwhm()
        if verbose:
            print('  Fitting radius = %5.3f' % (fitrad))
                    
        # Reject outliers
        if reject and nrejiter>0:
            medrms = np.median(pcat['rms'])
            sigrms = dln.mad(pcat['rms'].data)
            gd, = np.where(pcat['rms'] < medrms+3*sigrms)
            nrejstar = len(psfcat)-len(gd)
            if verbose:
                print('  RMS = %6.4f +/- %6.4f' % (medrms,sigrms))
                print('  Threshold RMS = '+str(medrms+3*sigrms))
                print('  Rejecting '+str(nrejstar)+' stars')
            if nrejstar>0:
                psfcat = psfcat[gd]

        # Subtract neighbors
        if nrejiter>0 and subnei:
            if verbose:
                print('Subtracting neighbors')
                # Find the neighbors in allcat
                # Fit the neighbors and PSF stars
                # Subtract neighbors from the image
                useimage = image.copy()  # start with original image
                useimage = subtractnei(useimage,allcat,cat,curpsf)
                
        # Fitting the PSF to the stars
        #-----------------------------
        newpsf,pars,perror,pcat,pf = fitpsf(curpsf,useimage,psfcat,fitradius=fitrad,method=method,
                                            maxiter=maxiter,minpercdiff=minpercdiff,verbose=verbose)

        # Add information into the output catalog
        ind1,ind2 = dln.match(outcat['id'],pcat['id'])
        outcat['reject'] = 1
        for n in pcat.columns:
            outcat[n][ind1] = pcat[n][ind2]
        outcat['reject'][ind1] = 0

        # Compare PSF parameters
        if type(newpsf)!=models.PSFEmpirical:
            pardiff = newpsf.params-curpsf.params
        else:
            pardiff = newpsf._data-curpsf._data
        sumpardiff = np.sum(np.abs(pardiff))
        curpsf = newpsf.copy()
        
        # Stopping criteria
        if reject is False or sumpardiff<0.05 or nrejiter>=maxrejiter or nrejstar==0: flag=1
        if subnei is True and nrejiter==0: flag=0   # iterate at least once with neighbor subtraction
        
        nrejiter += 1
        
    # Generate an empirical look-up table of corrections
    if lookup:
        if verbose:
            print('Making empirical lookup table with order='+str(lorder))

        pf.mklookup(lorder)
        # Fit the stars again and get new RMS values
        xdata = np.arange(pf.ntotpix)
        out = pf.model(xdata,*pf.psf.params)
        newpsf = pf.psf.copy()
        # Update information in the output catalog
        ind1,ind2 = dln.match(outcat['id'],pcat['id'])
        outcat['reject'] = 1
        outcat['reject'][ind1] = 0
        outcat['amp'][ind1] = pf.staramp[ind2]
        outcat['x'][ind1] = pf.starxcen[ind2]
        outcat['y'][ind1] = pf.starycen[ind2]
        outcat['rms'][ind1] = pf.starrms[ind2]
        outcat['chisq'][ind1] = pf.starchisq[ind2]                
        if verbose:
            print('Median RMS: '+str(np.median(pf.starrms)))            
            
    if verbose:
        print('dt = %.2f sec' % (time.time()-t0))
    
    return newpsf, pars, perror, outcat
コード例 #13
0
 # Restore the calibration summary file
 temp = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',1)
 schema = dict(temp.dtype.fields)
 schema['chipindx'] = (int,0)
 schema['ngoodchipwcs'] = (int,0)
 schema['wcscal'] = (np.str,50)
 schema['telstat'] = (np.str,50)
 dt = np.dtype(schema)
 calstr = np.zeros(len(temp),dtype=dt)
 calstr['chipindx'] = -1
 for n in temp.dtype.names: calstr[n]=temp[n]
 # Add WCSCAL and TELSTAT information
 coords = fits.getdata(basedir+'lists/allcoords.fits',1)
 fluxfile = calstr['file']
 fluxfile = fluxfile.replace('/net','')
 ind1,ind2 = dln.match(fluxfile,coords['file'])
 calstr['wcscal'][ind1] = coords['wcscal'][ind2]    # Failed (3153), Poor (14), Successful (308190)
 calstr['telstat'][ind1] = coords['telstat'][ind2]  # NAN (68188), Not (1222), Track (241826), UNKNOWN (116), Unknown (5)
 # the 2054 failed exposures did not match b/c no fluxfile info
 # Only want exposures with successful SE processing
 gd,ncalstr = dln.where(calstr['success']==1)
 calstr = calstr[gd]
 si = np.argsort(calstr['expdir'])
 calstr = calstr[si]
 chstr = fits.getdata(basedir+'lists/nsc_instcal_calibrate.fits',2)
 nchstr = len(chstr)
 # Get indices for CHSTR
 siexp = np.argsort(chstr['expdir'])
 chstr = chstr[siexp]
 expdir = chstr['expdir']
 brklo,nbrk = dln.where(expdir != np.roll(expdir,1))
コード例 #14
0
def fix_pms(pix):
    """ Correct the proper motions in the healpix object catalog."""

    t00 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    version = 'v3'
    nside = 128
    radeg = np.float64(180.00) / np.pi

    hdir = '/net/dl2/dnidever/nsc/instcal/'+version+'/combine/'+str(int(pix)//1000)+'/'
    objfile = hdir+str(pix)+'.fits.gz'
    outfile = hdir+str(pix)+'_pmcorr.fits'
    
    print('Correcting proper motions for '+str(pix))

    # Check that the object file exists
    if os.path.exists(objfile) is False:
        print(objfile+' NOT FOUND')
        return

    # Check fixed file  
    if os.path.exists(outfile+'.gz') == True:
        print(str(pix)+' already fixed')
        return

    # Load the object file
    #meta = fits.getdata(objfile,1)
    #obj = fits.getdata(objfile,2)
    meta = Table.read(objfile,1)
    obj = Table.read(objfile,2)
    nobj = len(obj)
    print(str(nobj)+' objects with '+str(np.sum(obj['ndet']))+' measurements')
    #print('KLUDGE!!! MAKING COPY OF OBJ!!!')
    #orig = obj.copy()

    #v = psutil.virtual_memory()
    #process = psutil.Process(os.getpid())
    #print('%6.1f Percent of memory used. %6.1f GB available.  Process is using %6.2f GB of memory.' % (v.percent,v.available/1e9,process.memory_info()[0]/1e9))

    # Break up into subregions
    totmeas = np.sum(obj['ndet'])
    nsub,bestind = dln.closest([1,4,16,64],int(np.ceil(totmeas/500000)))
    hinside = [128,256,512,1024][bestind]
    vecbound = hp.boundaries(nside,int(pix))
    allpix = hp.query_polygon(hinside,np.transpose(vecbound))
    allra,alldec = hp.pix2ang(hinside,allpix,lonlat=True)
    print(str(nsub)+' sub regions')

    # Get the objects within this subpixel
    objpix = hp.ang2pix(hinside,obj['ra'],obj['dec'],lonlat=True)

    ndet = np.zeros(nobj,int)
    #allpmra_old = np.zeros(nobj,float)
    #allpmdec_old = np.zeros(nobj,float)
    #allpmra_linefit = np.zeros(nobj,float)

    # Loop over subpixels
    for i in range(nsub):
        pix1 = allpix[i]
        print(str(i+1)+' '+str(pix1))

        # Get the measurements
        meas = get_meas(pix1,nside=hinside)
        nmeas = len(meas)
        if nmeas==0:
            print('No measurements in this subregion')
            continue

        #v = psutil.virtual_memory()
        #process = psutil.Process(os.getpid())
        #print('%6.1f Percent of memory used. %6.1f GB available.  Process is using %6.2f GB of memory.' % (v.percent,v.available/1e9,process.memory_info()[0]/1e9))

        # Get the objects within this subpixel
        objind, = np.where(objpix==pix1)
        obj1 = obj[objind]
        nobj1 = len(obj1)
        print('  '+str(nobj1)+' objects in this subregion')

        idindex = dln.create_index(meas['objectid'])
        ## Not all matched
        #if len(idindex['value']) != nobj:
        #    print('Number of unique OBJECTIDs in object and meas catalogs do not match')
        #    return
        ind1,ind2 = dln.match(obj1['objectid'],idindex['value'])
        # Not all matched
        if len(ind1) != nobj1:
            print(str(len(obj1))+' objects in this sub healpix but only measurements for '+str(len(ind1)))
            #print('Some objects are missing measurements')
            #return
        # Ensure they are arrays
        ind1 = np.atleast_1d(ind1)
        ind2 = np.atleast_1d(ind2)
        # sort by object index
        si = np.argsort(ind1)
        if len(ind1)>1:
            ind1 = ind1[si]
            ind2 = ind2[si]

        # Loop over
        ndet1 = np.zeros(nobj1,int)
        #allpmra_old1 = np.zeros(nobj1,float)
        #allpmdec_old1 = np.zeros(nobj1,float)
        #allpmra_linefit1 = np.zeros(nobj1,float)
        for j in range(len(ind1)):
            if (j % 1000)==0: print('  '+str(j))
            k = ind1[j]  # object index
            # Calculate the proper motions
            mind = idindex['index'][idindex['lo'][ind2[j]]:idindex['hi'][ind2[j]]+1]
            cat1 = meas[mind]
            ncat1 = len(cat1)
            ndet1[k] = ncat1
            if ncat1>1:
                raerr = np.array(cat1['raerr']*1e3,np.float64)    # milli arcsec
                ra = np.array(cat1['ra'],np.float64)
                ra -= np.mean(ra)
                ra *= 3600*1e3 * np.cos(obj1['dec'][k]/radeg)     # convert to true angle, milli arcsec
                t = cat1['mjd'].copy()
                t -= np.mean(t)
                t /= 365.2425                          # convert to year
                # Calculate robust slope
                try:
                    pmra, pmraerr = dln.robust_slope(t,ra,raerr,reweight=True)
                    #pmra_old, pmraerr_old = dln.robust_slope_old(t,ra,raerr,reweight=True)
                    #pmra_linefit = dln.poly_fit(t,ra,2,robust=True,sigma=raerr,initpar=pmra)
                except:
                    print('problem')
                    import pdb; pdb.set_trace()
                obj1['pmra'][k] = pmra                 # mas/yr
                obj1['pmraerr'][k] = pmraerr           # mas/yr
                #allpmra_old1[k] = pmra_old
                #allpmra_linefit1[k] = pmra_linefit

                decerr = np.array(cat1['decerr']*1e3,np.float64)   # milli arcsec
                dec = np.array(cat1['dec'],np.float64)
                dec -= np.mean(dec)
                dec *= 3600*1e3                         # convert to milli arcsec
                # Calculate robust slope
                try:
                    pmdec, pmdecerr = dln.robust_slope(t,dec,decerr,reweight=True)
                    #pmdec_old, pmdecerr_old = dln.robust_slope_old(t,dec,decerr,reweight=True)
                except:
                    print('problem')
                    import pdb; pdb.set_trace()
                obj1['pmdec'][k] = pmdec               # mas/yr
                obj1['pmdecerr'][k] = pmdecerr         # mas/yr
                #allpmdec_old1[k] = pmdec_old

        # Stuff subregion object back into big one
        obj[objind] = obj1
        ndet[objind] = ndet1
        #allpmra_old[objind] = allpmra_old1
        #allpmdec_old[objind] = allpmdec_old1
        #allpmra_linefit[objind] = allpmra_linefit1

        #import pdb; pdb.set_trace()

    #np.save(hdir+str(pix)+'_pmraold.npy',allpmra_old)
    #np.save(hdir+str(pix)+'_pmdecold.npy',allpmdec_old)
    #np.save(hdir+str(pix)+'_pmralinefit.npy',allpmra_linefit)

    #import pdb; pdb.set_trace()


    # Save the new version of obj
    # Write the output file
    print('Writing combined catalog to '+outfile)
    if os.path.exists(outfile): os.remove(outfile)
    #Table(meta).write(outfile)               # first, summary table
    meta.write(outfile)               # first, summary table
    #  append other fits binary tables
    hdulist = fits.open(outfile)
    #hdu = fits.table_to_hdu(Table(obj))        # second, catalog
    hdu = fits.table_to_hdu(obj)        # second, catalog
    hdulist.append(hdu)
    hdulist.writeto(outfile,overwrite=True)
    hdulist.close()
    if os.path.exists(outfile+'.gz'): os.remove(outfile+'.gz')
    ret = subprocess.call(['gzip',outfile])    # compress final catalog

    print('dt = %6.1f sec.' % (time.time()-t00))
コード例 #15
0
def apercorr(psf,image,objects,psfobj,verbose=False):
    """
    Calculate aperture correction.

    Parameters
    ----------
    psf : PSF object
       The best-fitting PSF model.
    image : string or CCDData object
      The input image to fit.  This can be the filename or CCDData object.
    objects : table
       The output table of best-fit PSF values for all of the sources.
    psfobj : table
       The table of PSF objects.
    verbose : boolean, optional
      Verbose output to the screen.  Default is False.
    Returns
    -------
    objects : table
       The output table with an "apcorr" column inserted and the aperture correction
         applied to "psfmag".
    apcor : float
       The aperture correction in mag.
    cgrow : numpy array
       The cumulative aperture correction array.

    Example
    -------

    apcor = apercorr(psf,image,objects,psfobj)

    """

    # Get model of all stars except the PSF stars
    ind1,ind2 = dln.match(objects['id'],psfobj['id'])
    left = np.delete(np.arange(len(objects)),ind1)
    neiobj = objects[left]
    neimodel = image.copy()
    neimodel.data *= 0
    neimodel.error[:] = 1
    neimodelim = psf.add(neimodel,neiobj)
    neimodel.data = neimodelim
    
    # Subtract everything except the PSF stars from the image
    resid = image.copy()
    if image.mask is not None:
        resid.data[~resid.mask] -= neimodel.data[~resid.mask]
    else:
        resid.data -= modelnei.data            
    residim = np.maximum(resid.data-resid.sky,0)
    resid.data = residim
    resid.sky[:] = 0.0
    
    # Do aperture photometry with lots of apertures on the PSF
    #  stars
    # rk = (20/3.)**(1/11.) * rk-1  for k=2,..,12
    rseeing = psf.fwhm()*0.5
    apers = np.cumprod(np.hstack((3.0,np.ones(11,float)*(20/3.)**(1/11.))))  
    #apers = np.array([3.0,3.7965,4.8046,6.0803,7.6947,9.7377,12.3232,
    #                  15.5952,19.7360,24.9762,31.6077,40.0000])


    apercat = aperphot(resid,psfobj,apers)   
    
    # Fit curve of growth
    # use magnitude differences between successive apertures.
    apars, agrow, derr = fitgrowth(apercat,apers,rseeing=psf.fwhm()*0.5)
    

    # Get magnitude difference errors
    nstars = len(apercat)
    napers = len(apers)
    derr = np.zeros((nstars,napers-1),float)
    for i in range(len(apers)-1):
        err1 = apercat['magerr_aper'+str(i+1)]
        err2 = apercat['magerr_aper'+str(i+2)]
        derr[:,i] = np.sqrt(err1**2+err2**2)
    wt = 1/derr**2

    
    # THE CURVE TURNS OVER AT LARGE RADIUS!!!!???
    # It shouldn't EVER do that.

    
    # Calculate empirical growth curve
    egrow,egrowerr = empgrowth(apercat,apers)
    
    # Get "adopted" growth curve by taking the weighted average
    # of the analytical and empirical growth curves
    # with the empirical weighted higher at small r and
    # the analytical weighted higher at large r
    gwt = np.mean(wt,axis=0)   # mean weights over the stars
    adopgrow = (egrow*gwt + agrow*(1/(0.1*agrow))**2) / (gwt+(1/(0.1*agrow))**2)
    adopgrowerr =  1 / (gwt+(1/(0.1*agrow))**2)

    # Adopted cumulative growth curve
    # sum from the outside in, with an outer tail given by
    # extrapolation of the analytic model to 2*outer aperture
    cadopgrow = np.cumsum(adopgrow[::-1])[::-1]
    # add extrapolation from rlast t=o2*rlast
    tail = diffprofile([2*apers[-1],apers[-1]],*apars)
    cadopgrow += tail
    cadopgrow = np.hstack((cadopgrow,tail))  # add value for outer aperture
    cadopgrowerr = np.hstack((adopgrowerr,0.0))
    
    # Calculate "total" magnitude for the PSF stars
    totmag,toterr = totphot(apercat,apers,cadopgrow,cadopgrowerr)
    
    # Calculate median offset between total and PSF magnitude
    # psf - total
    ind1,ind2 = dln.match(objects['id'],psfobj['id'])
    diffmag = objects['psfmag'][ind1] - totmag[ind2]
    apcor = np.median(diffmag)   # positive value
    
    # Apply aperture correction to the data
    #  add apcorr column and keep initial mags in instmag
    objects['apcorr'] = apcor
    objects['inst_psfmag'] = objects['psfmag']
    objects['psfmag'] -= apcor    # make brighter

    if verbose:
        print('Aperture correction = %.3f mag' % apcor)
    
    return objects, apcor, cadopgrow
コード例 #16
0
def get_missingids(exposure):
    """ Get the number of missing IDs from the log files."""

    t00 = time.time()
    hostname = socket.gethostname()
    host = hostname.split('.')[0]

    iddir = '/data0/dnidever/nsc/instcal/v3/idstr/'
    version = 'v3'

    # Load the exposures table
    print('Loading exposure table')
    expcat = fits.getdata(
        '/net/dl2/dnidever/nsc/instcal/' + version +
        '/lists/nsc_v3_exposure_table.fits.gz', 1)

    # Make sure it's a list
    if type(exposure) is str: exposure = [exposure]

    # Match exposures to exposure catalog
    eind1, eind2 = dln.match(expcat['EXPOSURE'], exposure)

    nexp = len(exposure)
    outstr = np.zeros(
        nexp,
        np.dtype([('exposure', (np.str, 100)), ('mtime', np.float64),
                  ('nmeas', int), ('nids', int), ('nmatches', int),
                  ('nduplicates', int), ('nmissing', int)]))
    #outstr['exposure'] = exposure
    outstr['mtime'] = -1
    outstr['nmeas'] = -1
    outstr['nmatches'] = -1
    outstr['nids'] = -1
    outstr['nduplicates'] = -1
    outstr['nmissing'] = -1

    # Loop over files
    for i in range(nexp):
        t0 = time.time()
        exp = expcat['EXPOSURE'][eind1[i]]
        print(str(i + 1) + ' ' + exp)
        outstr['exposure'][i] = exp

        instcode = expcat['INSTRUMENT'][eind1[i]]
        dateobs = expcat['DATEOBS'][eind1[i]]
        night = dateobs[0:4] + dateobs[5:7] + dateobs[8:10]
        expdir = '/net/dl2/dnidever/nsc/instcal/' + version + '/' + instcode + '/' + night + '/' + exp
        logfile = glob(expdir + '/' + exp + '_measure_update.????????????.log')
        nlogfile = len(logfile)
        # No logfile
        if nlogfile == 0:
            print('No logfile')
            continue
        # more than 1 logfile, get the latest one
        if nlogfile > 1:
            mtime = [os.path.getmtime(f) for f in logfile]
            si = np.argsort(mtime)[::-1]
            logfile = logfile[si[0]]
        else:
            logfile = logfile[0]
        outstr['mtime'][i] = os.path.getmtime(logfile)
        # Read in logfile
        lines = dln.readlines(logfile)
        # Number of measurements
        measind = dln.grep(lines, 'measurements', index=True)
        if len(measind) > 0:
            line1 = lines[measind[0]]
            lo = line1.find(']')
            hi = line1.find('measurements')
            nmeas = int(line1[lo + 3:hi - 1])
            outstr['nmeas'][i] = nmeas
        else:
            nmeas = -1
        # Number in idcat, "IDs for XX measurements"
        idsind = dln.grep(lines, 'IDs for', index=True)
        if len(idsind) > 0:
            line1 = lines[idsind[0]]
            lo = line1.find('for')
            hi = line1.find('measurements')
            nids = int(line1[lo + 4:hi - 1])
            outstr['nids'][i] = nids
        else:
            nids = -1
        # Number of matches
        matchind = dln.grep(lines, 'Matches for', index=True)
        if len(matchind) > 0:
            line1 = lines[matchind[0]]
            lo = line1.find('for')
            hi = line1.find('measurements')
            nmatches = int(line1[lo + 4:hi - 1])
            outstr['nmatches'][i] = nmatches
        else:
            nmatches = -1
        # Check for duplicates
        #if (len(measind)>0) & (len(matchind)>0):
        #    ndup = np.maximum(nmatches-nmeas,0)
        if (len(measind) > 0) & (len(idsind) > 0):
            ndup = np.maximum(nids - nmeas, 0)
            outstr['nduplicates'][i] = ndup
        else:
            ndup = -1
        # Number of missing IDs
        badind = dln.grep(lines, 'WARNING:', index=True)
        nbadind = len(badind)
        # Some missing objectids
        if nbadind > 0:
            badline = lines[badind[0]]
            lo = badline.find('WARNING:')
            hi = badline.find('measurements')
            nmissing = int(badline[lo + 9:hi - 1])
            outstr['nmissing'][i] = nmissing
        else:
            nmissing = 0
            outstr['nmissing'][i] = nmissing

        print('  Nmeas=' + str(nmeas) + ' Nids=' + str(nids) + ' Nmatches=' +
              str(nmatches) + ' Nduplicates=' + str(ndup) + ' Nmissing=' +
              str(nmissing))

    return outstr