def query_lsd(querystr, db=None, bounds=None, **kw): if db is None: db = os.environ['LSD_DB'] if not isinstance(db, DB): dbob = DB(db) else: dbob = db if bounds is not None: bounds = lsd.bounds.make_canonical(bounds) query = dbob.query(querystr, **kw) return query.fetch(bounds=bounds)
##################################################################### #to see how floating point errors will be handled #invalid = invalid floating point operations #divide = division by zero np.seterr(invalid='ignore', divide='ignore') # connect to folders with LSD data #keywords (case insensitive) select, from, where, into, as #db = DB('/ssd-raid0/bsesar:/a41233d1/LSD/from_cfa') #db = DB('/ssd-raid0/bsesar/projects/PS1/DVO:/a41233d1/LSD/from_cfa') if os.getenv('HOSTNAME') == 'aida41147': db = DB('/home/bsesar/projects/PS1/DVO:/a41233d1/LSD/from_cfa') else: db = DB('/a41147d1/bsesar/projects/PS1/DVO:/a41233d1/LSD/from_cfa') # define the query query = 's.obj_id as obj_id, t.ra as ra, t.dec as dec, \ t.raErr as raErr, t.decErr as decErr, \ t.nObs as nObs, t.mjd as mjd, \ -2.5*np.log10(s.mean(1)/s.mean_ap(1)) as sg_r, \ -2.5*np.log10(s.mean(2)/s.mean_ap(2)) as sg_i \ FROM dvo as t, ucal_fluxqy(matchedto=t, nmax=1, dmax=1.5) as s \ WHERE (sg_r > 0.3) & (sg_i > 0.3) & \ (sg_r < 1.0) & (sg_i < 1.0)' # table definition class Star(tables.IsDescription):
#!/usr/bin/env python from lsd import DB def row_counter_kernel(qresult): for rows in qresult: yield len(rows) db = DB('db') query = db.query("SELECT obj_id FROM ps1_obj, sdss") total = 0 for subtotal in query.execute([row_counter_kernel]): total += subtotal print "The total number of rows returned by the query '%s' is %d" % (query, total)
for typedesc in dtype.descr: name = typedesc[0] type = typedesc[1] if (type[0] == '<') or (type[0] == '>'): type = type[1:] if len(typedesc) > 2: type = '{}'.format(typedesc[2][0]) + type #tdescr += (typedesc[2],) # bit of a hack, but doesn't work with the more complicated format # specification and FITS binary tables don't support multidimensional # arrays as columns. tdescr = (name, type) columns.append(tdescr) pool = pool2.Pool() db = DB(db) with db.transaction(): if not db.table_exists(table): table = db.create_table(table, table_def) else: table = db.table(table) for fn, num in pool.imap_unordered(filenames, import_file, (table, ra, dec, is_radians)): print 'Imported file {:s} containing {:d} entries.'.format(fn, num) def main(): parser = argparse.ArgumentParser(description='Import FITS files to LSD') parser.add_argument('--db', '-d', default=os.environ['LSD_DB']) parser.add_argument('--ra', default='ra', help='Column in FITS file to rename "ra"') parser.add_argument('--dec', default='dec', help='Column in FITS file to rename "dec"')
import sys import os #http://research.majuric.org/trac/wiki/LargeSurveyDatabase from lsd import DB from lsd import bounds as lsdbounds import sqlite3 #to see how floating point errors will be handled #invalid = invalid floating point operations #divide = division by zero np.seterr(invalid='ignore', divide='ignore') # connect to folders with LSD data db = DB('/home/bsesar:/a41233d1/LSD/from_cfa') # define the query #keywords (case insensitive) select, from, where, into, as query = 's.obj_id as obj_id, t.ra as ra, t.dec as dec, \ t.raErr as raErr, t.decErr as decErr, \ t.nObs as nObs, t.mjd as mjd, \ -2.5*np.log10(s.mean(1)/s.mean_ap(1)) as sg_r, \ -2.5*np.log10(s.mean(2)/s.mean_ap(2)) as sg_i \ FROM dvo as t, ucal_fluxqy(matchedto=t, nmax=1, dmax=1.5) as s \ WHERE (sg_r > 0.3) & (sg_i > 0.3) & \ (sg_r < 1.0) & (sg_i < 1.0)' #table definition #TOCHECK - class Star(IsDescription): -- do we need "tables." everywhere? class Star(tables.IsDescription):
def main(): import sys numpy.seterr(invalid='ignore') db = lsd.DB(os.environ['LSD_DB']) l1=float(sys.argv[1]) l2=float(sys.argv[2]) b1=float(sys.argv[3]) b2=float(sys.argv[4]) print 'l1 ', l1 print 'l2 ', l2 print 'b1 ', b1 print 'b2 ', b2 from lsd import bounds numpy.seterr(invalid='ignore') usol = ubercal_flat_bigflat_chmask.read_flat_solution('/home/bsesar/usr/python/ubercal/ucalqy_bigflat_chmask.fits', chmask=True) #Regarding WISE, use this as your LSD_DB #export #LSD_DB=/mnt/fhgfs/schlafly/single-epoch-mirror/db3:/mnt/fhgfs/mazzucchelli/LSD_externals/:/home/bsesar/projects/DB #query2 = dbob.query( #'select psf_inst_mag, psf_inst_mag_sig, ps1_obj.obj_id as obj_id, '+ #'ps1_obj.ra as ra, ps1_obj.dec as dec, ps1_det.ra as detra, ps1_det.dec as detdec, ' #'filterid, '+ #'mjd_obs, x_psf, y_psf, '+ #'chip_id, ap_mag, psf_qf, psf_qf_perfect, flags, flags2, '+ #'w1mpro, w2mpro, w1sigmpro, w2sigmpro, allwise.ra as wisera, allwise.dec as wisedec, '+ #'allwise.sigra as sigrawise, allwise.sigdec as sigdecwise '+ #'from ps1_obj, ps1_det, ps1_exp, allwise(outer,matchedto=ps1_obj,dmax=2, nmax=1) '+ #'where '+ #'(psf_qf > 0.9) & ((flags & badflags) == 0) & '+ #'numpy.isfinite(ap_mag) & (ap_mag < 0) & (ap_mag > -25.) & '+ #'numpy.isfinite(psf_inst_mag) & (psf_inst_mag < 0) & '+ #'(psf_inst_mag > -25.)',locals={'badflags':badflags}) ############## db = os.environ['LSD_DB'] if not isinstance(db, DB): dbob = DB(db) else: dbob = db query2 = dbob.query( 'select obj_id, (psf_inst_mag-ap_mag) as sg, psf_inst_mag, psf_inst_mag_sig, '+ 'o.ra as ra, o.dec as dec, d.ra as detra, d.dec as detdec, equgal(ra, dec), SFD.EBV(l, b) as ebv, ' 'filterid, x_psf, y_psf, '+ 'chip_id, mjd_obs, airmass, pltscale, '+ 'moments_xx, moments_xy, moments_yy, moments_m3c, moments_m3s, moments_m4c, moments_m4s, moments_r1, moments_rh, '+ 'sky_sigma, (psf_chisq/psf_ndof) as psf_chi2pdf, ext_nsigma, psf_major, psf_minor, psf_theta, psf_qf, '+ 'psf_qf_perfect, psf_fwhm_maj, psf_fwhm_min, psf_core, psf_npix, flags, flags2, sky_limit_rad, sky_limit_flux, ' + 'w1mpro, w2mpro, w1sigmpro, w2sigmpro, allwise.ra as wisera, allwise.dec as wisedec, '+ 'allwise.sigra as sigrawise, allwise.sigdec as sigdecwise '+ 'from ucal_fluxqz as s, ps1_det as d, ps1_exp as e, ps1_obj(matchedto=s, nmax=1, dmax=1.5) as o, '+ 'allwise(outer,matchedto=o,dmax=2, nmax=1) '+ 'where numpy.isfinite(ap_mag) & (ap_mag < 0) & (ap_mag > -25.) & (psf_inst_mag_sig < 0.3) & '+ 'numpy.isfinite(psf_inst_mag) & (psf_inst_mag < 0) & '+ '(psf_inst_mag > -25.)') out=query2.execute([(makelc, usol, l1, l2, b1, b2)], group_by_static_cell=True, bounds=lsd.bounds.make_canonical(bounds.rectangle(l1, b1, l2, b2, coordsys='gal'))) out = [o for o in out]
import os def mapper(qresult, bins): for rows in qresult: counts, _ = np.histogram(rows['dec'], bins) for (bin, count) in zip(bins, counts): if count != 0: yield (bin, count) def reducer(kv): bin, counts = kv yield (bin, sum(counts)) db = DB(os.environ['LSD_DB']) query = db.query("SELECT dec FROM sdss") ddec = 10. bins = np.arange(-90, 90.0001, ddec) hist = {} for (bin, count) in query.execute([(mapper, bins), reducer]): hist[bin + ddec / 2] = count for binctr in sorted(hist.keys()): print "%+05.1f %10d" % (binctr, hist[binctr]) print "Total number of objects:", sum(hist.values())
#!/usr/bin/env python from lsd import DB import numpy as np def mapper(qresult, bins): for rows in qresult: counts, _ = np.histogram(rows['dec'], bins) for (bin, count) in zip(bins, counts): if count != 0: yield (bin, count) def reducer(kv): bin, counts = kv yield (bin, sum(counts)) db = DB('db') query = db.query("SELECT dec FROM sdss") ddec = 10. bins = np.arange(-90, 90.0001, ddec) hist = {} for (bin, count) in query.execute([(mapper, bins), reducer]): hist[bin + ddec/2] = count for binctr in sorted(hist.keys()): print "%+05.1f %10d" % (binctr, hist[binctr]) print "Total number of objects:", sum(hist.values())