Exemple #1
0
    def __init__(self,trigger_id):
        self.connection = ea.connect(DATABASE)
        self.cursor = connection.cursor()

        dire = './processing/'+trigger_id+'/'
        if not os.path.exists(dire):
            os.makedirs(dire)
Exemple #2
0
    def _make_conn(self):
        sources = self.get_sources()
        if sources is not None:
            # share connection with the sources
            conn=sources.get_conn()
        else:
            import easyaccess as ea
            conn=ea.connect(section='desoper')

        self._conn=conn
Exemple #3
0
    def get_paths(self, expnum, ccd, tag='Y3A1_FINALCUT'):

        try:
            ccd = ','.join(map(str, ccd))
        except:
            pass
        inputs = dict(expnum=expnum, ccd=ccd, tag=tag)
        self.base_query = self.base_query.format(**inputs)
        print(self.base_query)
        con = easyaccess.connect(self._db, user=self.user, passwd=self._passwd)
        self.data = con.query_to_pandas(self.base_query)
        print(self.data)
        for j in range(len(self.data)):
            self.links.append(self.root_url + self.data.PATH.ix[j])
Exemple #4
0
def run(args):
    usernm = args.usernm
    xs = float(args.xsize)
    ys = float(args.ysize)
    colors = args.colors.split(',')
    jobid = ''
    outdir = ''

    if rank == 0:
        jobid = str(uuid.uuid4())
        outdir = usernm + '/' + jobid + '/'

        if args.db == 'DR1':
            db = 'desdr'
        elif args.db == 'Y3A2':
            db = 'dessci'

        # This puts any input type into a pandas dataframe
        if args.csv:
            userdf = pd.DataFrame(pd.read_csv(args.csv))
        elif args.ra:
            coords = {}
            coords['RA'] = args.ra
            coords['DEC'] = args.dec
            userdf = pd.DataFrame.from_dict(coords, orient='columns')
        elif args.coadd:
            coadds = {}
            coadds['COADD_OBJECT_ID']
            userdf = pd.DataFrame.from_dict(coadds, orient='columns')

        df = pd.DataFrame()
        unmatched_coords = {'RA': [], 'DEC': []}
        unmatched_ids = []

        conn = ea.connect(db)
        curs = conn.cursor()

        if 'RA' in userdf:
            for i in range(len(userdf)):
                ra = userdf['RA'][i]
                ra180 = ra
                if ra > 180:
                    ra180 = 360 - ra

                if args.db == 'DR1':
                    query = "select * from (select T.TILENAME, M.ALPHAWIN_J2000, M.DELTAWIN_J2000, M.RA, M.DEC from DR1_TILE_INFO T, DR1_MAIN M where M.TILENAME = T.TILENAME and (CROSSRA0='N' and ({0} between RACMIN and RACMAX) and ({1} between DECCMIN and DECCMAX)) or (CROSSRA0='Y' and ({2} between RACMIN-360 and RACMAX) and ({1} between DECCMIN and DECCMAX))) where rownum=1".format(
                        ra, userdf['DEC'][i], ra180)
                #elif args.db.upper == 'Y3A2':

                f = conn.query_to_pandas(query)
                if f.empty:
                    unmatched_coords['RA'].append(userdf['RA'][i])
                    unmatched_coords['DEC'].append(userdf['DEC'][i])
                else:
                    df = df.append(f)

        if 'COADD_OBJECT_ID' in userdf:
            for i in range(len(userdf)):
                if args.db == 'DR1':
                    query = "select COADD_OBJECT_ID, ALPHAWIN_J2000, DELTAWIN_J2000, RA, DEC, TILENAME from DR1_MAIN where COADD_OBJECT_ID={0}".format(
                        userdf['COADD_OBJECT_ID'][i])
                #elif args.db.upper == 'Y3A2':

                f = conn.query_to_pandas(query)
                if f.empty:
                    unmatched_ids.append(userdf['COADD_OBJECT_ID'][i])
                else:
                    df = df.append(f)

        conn.close()

        df = df.sort_values(by=['TILENAME'])

        chunksize = int(df.shape[0] / nprocs) + (df.shape[0] % nprocs)
        df = [df[i:i + chunksize] for i in range(0, df.shape[0], chunksize)]

    else:
        df = None
        jobid = jobid

    jobid = comm.bcast(jobid, root=0)
    outdir = usernm + '/' + jobid + '/'
    df = comm.scatter(df, root=0)

    tilenm = df['TILENAME'].unique()
    for i in tilenm:
        tiledir = 'tiles_sample/' + i + '/'
        udf = df[df.TILENAME == i]
        udf = udf.reset_index()

        size = u.Quantity((ys, xs), u.arcmin)
        positions = SkyCoord(udf['ALPHAWIN_J2000'],
                             udf['DELTAWIN_J2000'],
                             frame='icrs',
                             unit='deg',
                             equinox='J2000',
                             representation_type='spherical')

        if args.make_tiffs or args.make_pngs:
            # 20180823 - I can't think of a good reason for this to NOT be included within the function above, so, merge?
            imgname = glob.glob(tiledir + '*.tiff')
            try:
                im = Image.open(imgname[0])
            except IOError as e:
                print('No TIFF file found for tile ' + str(i) +
                      '. Will not create true-color cutout.')
            else:
                MakeTiffCut(tiledir, outdir, im, positions, xs, ys, udf,
                            args.make_tiffs, args.make_pngs)

        if args.make_fits:
            MakeFitsCut(tiledir, outdir, size, positions, colors, udf)
Exemple #5
0
from astropy.table import Table, join, Column

#directory to store outputs
data_dir = '/Users/Christina/DES/data/'

#format of balrog tables in db
table_format = "JELENA.BALROG_SVA1_TAB{}_{}_{}"

table_nums = [str(r).zfill(2) for r in range(3, 12)]
#09 is not valid
table_nums.remove('09')

bands = ['G', 'R', 'I', 'Z']
tables = ['TRUTH']  #'SIM']#, 'NOSIM']

connection = ea.connect()

for table in tables:

    for table_num in table_nums:
        if table == 'SIM':
            out_file = data_dir + 'balrog_sva1_tab{}_SIM_flux_radii.fits'.format(
                table_num)
        elif table == 'TRUTH':
            out_file = data_dir + 'balrog_sva1_tab{}_TRUTH_fluxes.fits'.format(
                table_num)

        if os.path.exists(out_file):
            print "Output path {} already exists. Delete before running again. Moving to next table.".format(
                out_file)
            continue
Exemple #6
0
def db_connect(section='desoper'):
    db = ea.connect(section=section)
    cursor = db.cursor()
    return cursor
Exemple #7
0
def run(args):
    start = time.time()

    usernm = args.usernm
    xs = float(args.xsize)
    ys = float(args.ysize)
    colors = args.colors.split(',')
    jobid = str(uuid.uuid4())
    outdir = args.usernm + '/' + jobid + '/'
    maketiff = args.make_tiff
    makepngs = args.make_pngs
    makefits = args.make_fits

    if args.db == 'DR1':
        db = 'desdr'
    elif args.db == 'Y3A2':
        db = 'dessci'

    userdf = pd.DataFrame(pd.read_csv('des0210-1624_100_sample.csv'))
    #userdf = pd.DataFrame(pd.read_csv('des0210-1624_100_sample_no_tile_borders.csv'))
    userdf.drop(
        columns=['ALPHAWIN_J2000', 'DELTAWIN_J2000', 'RA', 'DEC', 'TILENAME'],
        inplace=True)

    df = pd.DataFrame()
    unmatched_coords = {'RA': [], 'DEC': []}
    unmatched_ids = []

    conn = ea.connect(db)
    curs = conn.cursor()

    if args.ra:
        for i in range(len(userdf)):
            ra = userdf['RA'][i]
            ra180 = ra
            if ra > 180:
                ra180 = 360 - ra
            if args.db == 'DR1':
                query = "select * from (select T.TILENAME, M.ALPAHWIN_J2000, M.DELTAWIN_J2000, M.RA, M.DEC from DR1_TILE_INFO T, DR1_MAIN M where M.TILENAME = T.TILENAME and (CROSSRA0='N' and ({0} between RACMIN and RACMAX) and ({1} between DECCMIN and DECCMAX)) or (CROSSRA0='Y' and ({2} between RACMIN-360 and RACMAX) and ({1} between DECCMIN and DECCMAX))) where rownum=1".format(
                    ra, userdf['DEC'][i], ra180)
            #elif args.db.upper == 'Y3A2':
            #
            f = conn.query_to_pandas(query)
            if f.empty:
                unmatched['RA'].append(userdf['RA'][i])
                unmatched['DEC'].append(userdf['DEC'][i])
            else:
                df = df.append(f)
    #if args.coadd:
    if 'COADD_OBJECT_ID' in userdf:
        for i in range(len(userdf)):
            if args.db == 'DR1':
                query = "select COADD_OBJECT_ID, ALPHAWIN_J2000, DELTAWIN_J2000, RA, DEC, TILENAME from DR1_MAIN where COADD_OBJECT_ID={0}".format(
                    userdf['COADD_OBJECT_ID'][i])
            #elif args.db.upper == 'Y3A2':
            #
            f = conn.query_to_pandas(query)
            if f.empty:
                unmatched_ids.append(userdf['COADD_OBJECT_ID'][i])
            else:
                df = df.append(f)

    conn.close()
    df = df.merge(userdf, on='COADD_OBJECT_ID', how='inner')
    #df = df.sort_values(by=['TILENAME'])			# 2018-08-09 - Isn't necessary when slicing dataframe by tilename below.

    dt1 = time.time() - start
    print(
        'It took {} seconds to sort the desired objects by tile.'.format(dt1))

    size = u.Quantity((ys, xs), u.arcmin)
    #positions = SkyCoord(df['ALPHAWIN_J2000'], df['DELTAWIN_J2000'], frame='icrs', unit='deg', equinox='J2000', representation_type='spherical')
    #positions = SkyCoord([32.301142,32.301148], [-16.475058,-16.306904], frame='icrs', unit='deg', equinox='J2000', representation_type='spherical')
    #positions = SkyCoord([32.339727, 32.579913], [-16.486495, -16.402778], frame='icrs', unit='deg', equinox='J2000', representation_type='spherical')

    tilenm = df['TILENAME'].unique()
    for i in tilenm:
        tiledir = i + '/'
        udf = df[df.TILENAME == i]

        positions = SkyCoord(udf['ALPHAWIN_J2000'],
                             udf['DELTAWIN_J2000'],
                             frame='icrs',
                             unit='deg',
                             equinox='J2000',
                             representation_type='spherical')

        if maketiff or makepngs:
            imgname = glob.glob(tiledir + '*.tiff')
            try:
                im = Image.open(imgname[0])
            except IOError as e:
                print('No TIFF file found. Will not create true-color cutout.')
            else:
                MakeTiffCut(tiledir, outdir, im, positions, xs, ys, udf,
                            maketiff, makepngs)

        if args.make_fits:
            MakeFitsCut(tiledir, outdir, size, positions, colors, udf)

    dt2 = time.time() - dt1
    print('It took {} seconds to make the cutouts.'.format(dt2))

    end = dt2 - start
    print('It took {} seconds for this program to run.'.format(end))
Exemple #8
0
#Produce Truth Table for Fakes#
explist=','.join(map(str,expnums))

# the database where diffimg outputs are stored                                 
db='destest'
schema = 'marcelle'

# the query you want to run to get the truth table data                         
query='select distinct SNFAKE_ID, EXPNUM, CCDNUM, TRUEMAG, TRUEFLUXCNT, FLUXCNT, BAND, NITE, MJD from '+ schema +'.SNFAKEIMG where EXPNUM IN ('+explist+') order by SNFAKE_ID'

# the file where you want to save the truth table                              

 
filename= config.get('GWmakeDataFiles-fake', 'fake_input')
filename=os.path.join(outdir,filename)
connection=easyaccess.connect(db)
connection.query_and_save(query,filename)
connection.close()

### FOR THE FIRST RUN EXIT HERE TO LEARN NAMES OF VALUES WE NEED###
print "Data Made"
 

print "Read Data"

#Make plots Section#
###Plot1 Efficiency Plot ###
###Plot5 Magerror Distribution ###
###Plots should include all bands###

reals = diffimg.DataSet(os.path.join(outdir,outDir_datareal), label = 'reals')
Exemple #9
0
def run(args):
	if rank == 0:
		if args.db == 'DR1':
			db = 'desdr'
		elif args.db == 'Y3A2':
			db = 'dessci'
		
		conn = ea.connect(db)
		curs = conn.cursor()
		
		usernm = str(conn.user)
		if args.jobid:
			jobid = args.jobid
		else:
			jobid = str(uuid.uuid4())
		
		outdir = OUTDIR + usernm + '/' + jobid + '/'
		
		try:
			os.makedirs(outdir, exist_ok=False)
		except OSError as e:
			print(e)
			print('Specified jobid already exists in output directory. Aborting job.')
			conn.close()
			sys.stdout.flush()
			comm.Abort()
	else:
		usernm, jobid, outdir = None, None, None
	
	usernm, jobid, outdir = comm.bcast([usernm, jobid, outdir], root=0)
	
	logtime = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
	#logname = OUTDIR + 'BulkThumbs_' + logtime + '.log'
	logname = outdir + 'BulkThumbs_' + logtime + '.log'
	formatter = logging.Formatter('%(asctime)s - '+str(rank)+' - %(levelname)-8s - %(message)s')
	
	logger = logging.getLogger(__name__)
	logger.setLevel(logging.INFO)
	
	fh = MPILogHandler(logname, comm)
	fh.setLevel(logging.INFO)
	fh.setFormatter(formatter)
	logger.addHandler(fh)
	
	xs = float(args.xsize)
	ys = float(args.ysize)
	colors = args.colors.split(',')
	#usernm = ''
	#jobid = ''
	#outdir = ''
	
	if rank == 0:
		summary = {}
		start = time.time()
		#if args.db == 'DR1':
		#	db = 'desdr'
		#elif args.db == 'Y3A2':
		#	db = 'dessci'
		
		logger.info('Selected Options:')
		
		# This puts any input type into a pandas dataframe
		if args.csv:
			userdf = pd.DataFrame(pd.read_csv(args.csv))
			logger.info('    CSV: '+args.csv)
			summary['csv'] = args.csv
		elif args.ra:
			coords = {}
			coords['RA'] = args.ra
			coords['DEC'] = args.dec
			userdf = pd.DataFrame.from_dict(coords, orient='columns')
			logger.info('    RA: '+str(args.ra))
			logger.info('    DEC: '+str(args.dec))
			summary['ra'] = str(args.ra)
			summary['dec'] = str(args.dec)
		elif args.coadd:
			coadds = {}
			coadds['COADD_OBJECT_ID'] = args.coadd
			userdf = pd.DataFrame.from_dict(coadds, orient='columns')
			logger.info('    CoaddID: '+str(args.coadd))
			summary['coadd'] = str(args.coadd)
		
		logger.info('    X size: '+str(args.xsize))
		logger.info('    Y size: '+str(args.ysize))
		logger.info('    Make TIFFs? '+str(args.make_tiffs))
		logger.info('    Make PNGs? '+str(args.make_pngs))
		logger.info('    Make FITS? '+str(args.make_fits))
		logger.info('    Make RGBs? {}'.format('True' if args.make_rgb else 'False'))
		summary['xsize'] = str(args.xsize)
		summary['ysize'] = str(args.ysize)
		summary['make_tiffs'] = str(args.make_tiffs)
		summary['make_pngs'] = str(args.make_pngs)
		summary['make_fits'] = str(args.make_fits)
		summary['make_rgb'] = 'True' if args.make_rgb else 'False'
		if args.make_fits:
			logger.info('        Bands: '+args.colors)
			summary['bands'] = args.colors
		if args.make_rgb:
			logger.info('        Bands: '+str(args.make_rgb))
			summary['rgb_colors'] = args.make_rgb
		summary['db'] = args.db
		
		df = pd.DataFrame()
		unmatched_coords = {'RA':[], 'DEC':[]}
		unmatched_coadds = []
		
		logger.info('User: '******'JobID: ' + str(jobid))
		summary['user'] = usernm
		summary['jobid'] = str(jobid)
		
		tablename = 'BTL_'+jobid.upper().replace("-","_")	# "BulkThumbs_List_<jobid>"
		if 'RA' in userdf:
			if args.db == 'Y3A2':
				ra_adjust = [360-userdf['RA'][i] if userdf['RA'][i]>180 else userdf['RA'][i] for i in range(len(userdf['RA']))]
				userdf = userdf.assign(RA_ADJUSTED = ra_adjust)
				userdf.to_csv(OUTDIR+tablename+'.csv', index=False)
				conn.load_table(OUTDIR+tablename+'.csv', name=tablename)
				
				#query = "select temp.RA, temp.DEC, temp.RA_ADJUSTED, temp.RA as ALPHAWIN_J2000, temp.DEC as DELTAWIN_J2000, m.TILENAME from {} temp left outer join Y3A2_COADDTILE_GEOM m on (m.CROSSRA0='N' and (temp.RA between m.URAMIN and m.URAMAX) and (temp.DEC between m.UDECMIN and m.UDECMAX)) or (m.CROSSRA0='Y' and (temp.RA_ADJUSTED between m.URAMIN-360 and m.URAMAX) and (temp.DEC between m.UDECMIN and m.UDECMAX))".format(tablename)
				query = "select temp.RA, temp.DEC, temp.RA_ADJUSTED, temp.RA as ALPHAWIN_J2000, temp.DEC as DELTAWIN_J2000, m.TILENAME"
				if 'XSIZE' in userdf:
					query += ", temp.XSIZE"
				if 'YSIZE' in userdf:
					query += ", temp.YSIZE"
				query += " from {} temp left outer join Y3A2_COADDTILE_GEOM m on (m.CROSSRA0='N' and (temp.RA between m.URAMIN and m.URAMAX) and (temp.DEC between m.UDECMIN and m.UDECMAX)) or (m.CROSSRA0='Y' and (temp.RA_ADJUSTED between m.URAMIN-360 and m.URAMAX) and (temp.DEC between m.UDECMIN and m.UDECMAX))".format(tablename)
				
				df = conn.query_to_pandas(query)
				curs.execute('drop table {}'.format(tablename))
				os.remove(OUTDIR+tablename+'.csv')
				
				df = df.replace('-9999',np.nan)
				df = df.replace(-9999.000000,np.nan)
				dftemp = df[df.isnull().any(axis=1)]
				unmatched_coords['RA'] = dftemp['RA'].tolist()
				unmatched_coords['DEC'] = dftemp['DEC'].tolist()
				df = df.dropna(axis=0, how='any')
			
			if args.db == 'DR1':
				for i in range(len(userdf)):
					ra = userdf['RA'][i]
					ra180 = ra
					if ra > 180:
						ra180 = 360 - ra
						
					query = "select * from (select TILENAME from DR1_TILE_INFO where (CROSSRA0='N' and ({0} between RACMIN and RACMAX) and ({1} between DECCMIN and DECCMAX)) or (CROSSRA0='Y' and ({2} between RACMIN-360 and RACMAX) and ({1} between DECCMIN and DECCMAX))) where rownum=1".format(ra, userdf['DEC'][i], ra180)
					
					f = conn.query_to_pandas(query)
					if f.empty:
						unmatched_coords['RA'].append(userdf['RA'][i])
						unmatched_coords['DEC'].append(userdf['DEC'][i])
					else:	
						df = df.append(f)
			logger.info('Unmatched coordinates: \n{0}\n{1}'.format(unmatched_coords['RA'], unmatched_coords['DEC']))
			summary['Unmatched_Coords'] = unmatched_coords
			print(unmatched_coords)
		
		if 'COADD_OBJECT_ID' in userdf:
			if args.db == 'Y3A2':
				userdf.to_csv(OUTDIR+tablename+'.csv', index=False)
				conn.load_table(OUTDIR+tablename+'.csv', name=tablename)
				
				#query = "select temp.COADD_OBJECT_ID, m.ALPHAWIN_J2000, m.DELTAWIN_J2000, m.RA, m.DEC, m.TILENAME from {} temp left outer join Y3A2_COADD_OBJECT_SUMMARY m on temp.COADD_OBJECT_ID=m.COADD_OBJECT_ID".format(tablename)
				query = "select temp.COADD_OBJECT_ID, m.ALPHAWIN_J2000, m.DELTAWIN_J2000, m.RA, m.DEC, m.TILENAME"
				if 'XSIZE' in userdf:
					query += ", temp.XSIZE"
				if 'YSIZE' in userdf:
					query += ", temp.YSIZE"
				query += " from {} temp left outer join Y3A2_COADD_OBJECT_SUMMARY m on temp.COADD_OBJECT_ID=m.COADD_OBJECT_ID".format(tablename)
				
				df = conn.query_to_pandas(query)
				curs.execute('drop table {}'.format(tablename))
				os.remove(OUTDIR+tablename+'.csv')
				
				df = df.replace('-9999',np.nan)
				df = df.replace(-9999.000000,np.nan)
				dftemp = df[df.isnull().any(axis=1)]
				unmatched_coadds = dftemp['COADD_OBJECT_ID'].tolist()
				df = df.dropna(axis=0, how='any')
			
			if args.db == 'DR1':
				for i in range(len(userdf)):
					query = "select COADD_OBJECT_ID, ALPHAWIN_J2000, DELTAWIN_J2000, RA, DEC, TILENAME from DR1_MAIN where COADD_OBJECT_ID={0}".format(userdf['COADD_OBJECT_ID'][i])
					
					f = conn.query_to_pandas(query)
					if f.empty:
						unmatched_coadds.append(userdf['COADD_OBJECT_ID'][i])
					else:
						df = df.append(f)
			logger.info('Unmatched coadd ID\'s: \n{}'.format(unmatched_coadds))
			summary['Unmatched_Coadds'] = unmatched_coadds
			print(unmatched_coadds)
		
		conn.close()
		df = df.sort_values(by=['TILENAME'])
		df = df.drop_duplicates(['RA','DEC'], keep='first')
		
		if args.return_list:
			os.makedirs(outdir, exist_ok=True)
			df.to_csv(outdir+tablename+'.csv', index=False)
		
		df = np.array_split(df, nprocs)
		
		end1 = time.time()
		query_elapsed = '{0:.2f}'.format(end1-start)
		print('Querying took (s): ' + query_elapsed)
		logger.info('Querying took (s): ' + query_elapsed)
		summary['query_time'] = query_elapsed
	
	else:
		df = None
	
	df = comm.scatter(df, root=0)
	
	tilenm = df['TILENAME'].unique()
	for i in tilenm:
		tiledir = TILES_FOLDER + i + '/'
		udf = df[ df.TILENAME == i ]
		udf = udf.reset_index()
		
		size = u.Quantity((ys, xs), u.arcmin)
		positions = SkyCoord(udf['ALPHAWIN_J2000'], udf['DELTAWIN_J2000'], frame='icrs', unit='deg', equinox='J2000', representation_type='spherical')
		
		if args.make_tiffs or args.make_pngs:
			MakeTiffCut(tiledir, outdir+i+'/', positions, xs, ys, udf, args.make_tiffs, args.make_pngs)
		
		if args.make_fits:
			MakeFitsCut(tiledir, outdir+i+'/', size, positions, colors, udf)
		
		if args.make_rgb:
			MakeLuptonRGB(tiledir, outdir+i+'/', udf, positions, xs, ys, args.make_rgb, args.rgb_minimum, args.rgb_stretch, args.rgb_asinh)
	
	comm.Barrier()
	
	if rank == 0:
		end2 = time.time()
		processing_time = '{0:.2f}'.format(end2-end1)
		print('Processing took (s): ' + processing_time)
		logger.info('Processing took (s): ' + processing_time)
		summary['processing_time'] = processing_time
		
		dirsize = getPathSize(outdir)
		dirsize = dirsize * 1. / 1024
		if dirsize > 1024. * 1024:
			dirsize = '{0:.2f} GB'.format(1. * dirsize / 1024. / 1024)
		elif dirsize > 1024.:
			dirsize = '{0:.2f} MB'.format(1. * dirsize / 1024.)
		else:
			dirsize = '{0:.2f} KB'.format(dirsize)
		
		logger.info('All processes finished.')
		logger.info('Total file size on disk: {}'.format(dirsize))
		summary['size_on_disk'] = str(dirsize)
		
		files = glob.glob(outdir + '*/*')
		logger.info('Total number of files: {}'.format(len(files)))
		summary['number_of_files'] = len(files)
		files = [i.split('/')[-2:] for i in files]
		files = [('/').join(i) for i in files]
		if 'COADD_OBJECT_ID' in userdf:
			files = [i.split('.')[-2] for i in files]
		else:
			files = [('.').join(i.split('.')[-4:-1]) for i in files]
		files = [i.split('_')[0] for i in files]
		files = list(set(files))
		summary['files'] = files
		
		jsonfile = outdir + 'BulkThumbs_'+logtime+'_SUMMARY.json'
		with open(jsonfile, 'w') as fp:
			json.dump(summary, fp)
Exemple #10
0
                    datefmt='%Y-%m-%d %H:%M:%S', stream=sys.stdout)

import easyaccess as ea

if __name__ == "__main__":
    import argparse
    description = "python script"
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('infiles',nargs='+')
    parser.add_argument('-t','--table',default='MyTable')
    parser.add_argument('-f','--force',action='store_true')
    parser.add_argument('-s','--section',default='desoper')
    args = parser.parse_args()
    
    #ea.set_color_term(False)
    con = ea.connect(section=args.section,quiet=True)
    con.ct = False
    cur = con.cursor()

    msg = "easyaccess v%s"%ea.__version__
    logging.info(msg)

    exists = con.check_table_exists(args.table)
    if exists and not args.force:
        msg = "Found table %s; skipping..."%args.table
        raise Exception(msg)
    elif exists and args.force:
        con.drop_table(args.table,purge=True)
     
    for i,infile in enumerate(args.infiles):
        msg = '\n' + 30*"-" + '\n'
def find_observations(search_exps, cand_df, orbit, year='Y4', nsigma=3):
    '''
    Finds SE transients in the search exposures from the given year that fall inside the error ellipse of the candidate.
    :param search_exps: dataframe of exposures to be searched
    :param cand_df: dataframe of observations of the candidate
    :param orbit: the candidate's corresponding orbit
    :param year: DES observing year of the search exposures, e.g. 'Y4'
    :param nsigma: multiplier for error ellipse dimensions
    :return: dataframe of transients
    '''
    conn_desoper = ea.connect(section='desoper')
    conn_dessci = ea.connect(section='dessci')
    matched_obs = pd.DataFrame()
    transients_inside = pd.DataFrame()
#    search_exps = search_exps[search_exps['expnum']!=374572]  # to exclude particular exposures

    df = pd.DataFrame(columns=['expnum', 'ra_pred', 'dec_pred', 'err_a', 'err_b', 'err_PA'])
    full_df_coadd = pd.DataFrame()

    for ind, exp in search_exps.iterrows():
        print 'Searching exposure... ', exp['expnum'], ephem.date(exp['date']), exp['band'], exp['t_eff'],'...',
        pos_pred = orbit.predict_pos(exp['date'])
        ra_pred, dec_pred, err_a, err_b, err_PA = pos_pred['ra'], pos_pred['dec'], pos_pred['err']['a'],\
            pos_pred['err']['b'], pos_pred['err']['PA']
        print 'RA_PRED, DEC_PRED, ERR_A, ERR_B, PA = ', ra_pred, dec_pred, err_a, err_b, err_PA
        if err_a<15.0: err_a=15.0
        if err_b<15.0: err_b=15.0
        coadd_obs = get_coadd_cutout(conn_dessci, ra_pred, dec_pred, nsigma * err_a)
        full_df_coadd = full_df_coadd.append(coadd_obs)

        df.loc[ind] = [exp['expnum'], ra_pred, dec_pred, nsigma*err_a, nsigma*err_b, err_PA]

    if year=='Y4':
        se_obs = get_SE_detections(conn_desoper, df)
    else:
        se_obs = get_SE_detections(conn_dessci, df)

    #coadd_obs = get_coadd_cutout(conn_dessci, df)
    transients = get_transient_detections(se_obs, full_df_coadd)



    if len(transients):
        transients['delta_chisq'] = transients.apply(lambda row: getOrbit(cand_df, row).chisq-orbit.chisq,axis=1)
#            transients['mag'] = good_matches.apply(lambda row: mag_calib(row), axis=1)
        transients['mag'] = 99.    # temp workaround
        transients['ml_score']=10.0   # placeholder
        transients['fakeid']=0
        transients.rename(columns={'object_number': 'objid'}, inplace=True)
# These transients fall inside the error ellipse

        transients_inside = transients.loc[transients.apply(lambda row:
                                                            point_in_ellipse(row['ra'],
                                                                             row['dec'],
                                                                             np.degrees(df.loc[df['expnum'] == row['expnum'], 'ra_pred'].iloc[0]),
                                                                             np.degrees(df.loc[df['expnum'] == row['expnum'], 'dec_pred'].iloc[0]),
                                                                             df.loc[df['expnum'] == row['expnum'], 'err_a'].iloc[0],
                                                                             df.loc[df['expnum'] == row['expnum'], 'err_b'].iloc[0],
                                                                             90 - df.loc[df['expnum'] == row['expnum'], 'err_PA'].iloc[0]),
                                                                             axis=1)]

            #transients_inside = transients.loc[transients.apply(lambda row: point_in_ellipse(row['ra'], row['dec'], df), axis=1)]
#            for ind2, row2 in transients_inside.iterrows():
#                print ephem.date(exp['date']), str(ephem.hours(np.radians(row2['ra']))), str(ephem.degrees(np.radians(row2['dec']))), exp['expnum'],exp['band']
        matched_obs = pd.concat([matched_obs, transients_inside])
    return matched_obs
Exemple #12
0
def query_function(params):
    con = ea.connect('desdr')
    DF = con.query_to_pandas(query_string(params))
    DF = QTable.from_pandas(DF)
    return DF
Exemple #13
0
import os
import sys
import pandas as pd
import easyaccess as ea
from tqdm import tqdm

deep = pd.read_hdf(sys.argv[1], key='main')
new_sncand = pd.read_csv(
    '/media/data3/wiseman/des/coadding/catalogs/all_snids.csv')

#new_snids = new_sncand[new_sncand['SNFAKE_ID']==0]['SNID'].values
conn = ea.connect('desoper')
cursor = conn.cursor()
try:
    start = sys.argv[2]
except:
    start = 0
for i in tqdm(range(int(start), len(deep))):  #len(deep)

    query = (
        "INSERT INTO SNGALS_DEEP "
        "( A_IMAGE, B_IMAGE, CCDNUM, CLASS_STAR_G, CLASS_STAR_I,"
        "CLASS_STAR_R, CLASS_STAR_Z, CXX_IMAGE, CXY_IMAGE, CYY_IMAGE,"
        "DLR, DLR_RANK, ELONGATION, FIELD, FLUXERR_APER_4_G,"
        "FLUXERR_APER_4_I, FLUXERR_APER_4_R, FLUXERR_APER_4_Z,"
        "FLUXERR_AUTO_G, FLUXERR_AUTO_I, FLUXERR_AUTO_R, FLUXERR_AUTO_Z,"
        "FLUX_APER_4_G, FLUX_APER_4_I, FLUX_APER_4_R, FLUX_APER_4_Z,"
        "FLUX_AUTO_G, FLUX_AUTO_I, FLUX_AUTO_R, FLUX_AUTO_Z,"
        "KRON_RADIUS, LIMFLUX_G, LIMFLUX_I, LIMFLUX_R, LIMFLUX_Z,"
        "LIMMAG_G, LIMMAG_I, LIMMAG_R, LIMMAG_Z, MAGERR_APER_4_G,"
        "MAGERR_APER_4_I, MAGERR_APER_4_R, MAGERR_APER_4_Z,"
Exemple #14
0
    parser.add_argument('--user',
                        dest='user')
    parser.add_argument('--password',
                        dest='password')
    options = parser.parse_args()
    user = options.user
    password = options.password
    out_directory = options.out
    csv_path = '{0}/httppath_exposure.csv'.format(out_directory)
    if options.job == 'paths':
        print('Downloading CSV of exposures.')
        query = "select path, obstype, band, exposurename, id, expnum, nite, mjd_obs from httppath_exposure where obstype in ('dome flat')"

        try:
            import easyaccess
            connection = easyaccess.connect()
            cursor = connection.cursor()
            #csv = connection.query_to_pandas(query)
            connection.query_and_save(query, csv_path)
            connection.close()

        except:
            # huh. Well let's try commandline
            command = ['easyaccess', '-c',
                       "{0}; > {1}".format(query, csv_path)]
            call(command)

    elif options.job == 'images':
        print('Using CSV at {0}'.format(csv_path))
        csv = pd.read_csv(csv_path)
        # cut out duplicate filenames
Exemple #15
0
class TestInterpreter(unittest.TestCase):

    conf = ea.config_mod.get_config(ea.config_file)
    conf.set('display', 'loading_bar', 'no')
    db = conf.get('easyaccess', 'database')
    desconf = ea.config_mod.get_desconfig(ea.desfile, db)
    con = ea.easy_or(conf,
                     desconf,
                     db,
                     interactive=False,
                     quiet=True,
                     refresh=False)
    con2 = ea.connect(quiet=True)
    tablename = 'testtable'
    nrows = 10000
    prefetch = 4000
    chunk = 1000
    memsize = 1
    sqlfile = 'temp.sql'
    csvfile = 'temp.csv'
    fitsfile = 'temp.fits'
    h5file = 'temp.h5'

    def test_describe(self):
        print('\n*** test_describe ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        command = 'describe_table %s;' % self.tablename.upper()
        self.con.onecmd(command)
        self.con.drop_table(self.tablename)
        os.remove(self.csvfile)

    def test_add_comment(self):
        print('\n*** test_add_comment ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        self.con.drop_table(self.tablename)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        command = "add_comment table %s 'Test table'" % self.tablename.upper()
        self.con.onecmd(command)
        command = "add_comment column %s.RA 'Coordinate'" % self.tablename.upper(
        )
        self.con.onecmd(command)
        command = 'describe_table %s;' % self.tablename.upper()
        self.con.onecmd(command)
        self.con.drop_table(self.tablename)
        os.remove(self.csvfile)

    def test_select(self):
        print('\n*** test_select ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        self.con.drop_table(self.tablename)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        command = "select RA,DEC from %s ;" % self.tablename.upper()
        self.con.onecmd(command)
        self.con.drop_table(self.tablename)
        os.remove(self.csvfile)

    def test_select_csv(self):
        print('\n*** test_select_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        self.con.drop_table(self.tablename)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        command = "select RA,DEC from %s ; > %s" % (self.tablename.upper(),
                                                    self.csvfile)
        self.con.onecmd(command)
        self.assertTrue(os.path.exists(self.csvfile))
        os.remove(self.csvfile)
        self.con.drop_table(self.tablename)

    def test_select_fits(self):
        print('\n*** test_select_fits ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        self.con.drop_table(self.tablename)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        os.remove(self.csvfile)
        command = "select RA,DEC from %s ; > %s" % (self.tablename.upper(),
                                                    self.fitsfile)
        self.con.onecmd(command)
        self.assertTrue(os.path.exists(self.fitsfile))
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_select_hdf5(self):
        print('\n*** test_select_hdf5 ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        self.con.drop_table(self.tablename)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        os.remove(self.csvfile)
        command = "select RA,DEC from %s ; > %s" % (self.tablename.upper(),
                                                    self.h5file)
        self.con.onecmd(command)
        self.assertTrue(os.path.exists(self.h5file))
        os.remove(self.h5file)
        self.con.drop_table(self.tablename)

    def test_select_by_chunks(self):
        print('\n*** test_select_by_chunks ***\n')
        global load_bar
        load_bar = False
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        for i in range(34):
            command = "append_table %s --tablename %s" % (self.csvfile,
                                                          self.tablename)
            self.con.onecmd(command)
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 35)
        command = "prefetch set 30000"
        self.con.onecmd(command)
        self.con.outfile_max_mb = 1
        command = "select RA,DEC from %s ; > %s" % (self.tablename.upper(),
                                                    self.csvfile)
        self.con.onecmd(command)
        for i in range(6):
            self.assertTrue(
                os.path.exists(
                    os.path.splitext(self.csvfile)[0] + '_00000' + str(i + 1) +
                    '.csv'))
            os.remove(
                os.path.splitext(self.csvfile)[0] + '_00000' + str(i + 1) +
                '.csv')
        self.con.outfile_max_mb = 1000
        self.con.drop_table(self.tablename)
        if os.path.exists(self.csvfile):
            os.remove(self.csvfile)

    def test_load_append_table_csv(self):
        print('\n*** test_load_append_table_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.drop_table(os.path.splitext(self.csvfile)[0].upper())
        # name from filename
        command = "load_table %s " % self.csvfile
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.csvfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)

        # appending
        command = "append_table %s " % self.csvfile
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.csvfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(os.path.splitext(self.csvfile)[0].upper())
        os.remove(self.csvfile)

    def test_load_append_table_name_csv(self):
        print('\n*** test_load_append_table_name_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        # name from tablename
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        # appending
        command = "append_table %s --tablename %s" % (self.csvfile,
                                                      self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(self.tablename)
        os.remove(self.csvfile)

    def test_load_append_table_chunk_csv(self):
        print('\n*** test_load_append_table_chunk_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        # chunksize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --chunksize %s" % (
            self.csvfile, self.tablename, self.chunk)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        # appending
        command = "append_table %s --tablename %s --chunksize %s" % (
            self.csvfile, self.tablename, self.chunk)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)

    def test_load_append_table_memory_csv(self):
        print('\n*** test_load_append_table_memory_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        for i in range(9):
            df.to_csv(self.csvfile,
                      index=False,
                      float_format='%.8f',
                      sep=',',
                      mode='a',
                      header=False)
        self.assertTrue(os.path.exists(self.csvfile))
        # memsize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --memsize %s" % (
            self.csvfile, self.tablename, self.memsize)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 10)
        # appending
        command = "append_table %s --tablename %s --memsize %s" % (
            self.csvfile, self.tablename, self.memsize)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 20)
        # end
        os.remove(self.csvfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_memory_chunk_csv(self):
        print('\n*** test_load_append_table_memory_chunk_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        for i in range(9):
            df.to_csv(self.csvfile,
                      index=False,
                      float_format='%.8f',
                      sep=',',
                      mode='a',
                      header=False)
        self.assertTrue(os.path.exists(self.csvfile))
        # memsize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --memsize %s --chunksize %s" % (
            self.csvfile, self.tablename, self.memsize, self.chunk * 10)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 10)
        # appending
        command = "append_table %s --tablename %s --memsize %s --chunksize %s" % (
            self.csvfile, self.tablename, self.memsize, self.chunk * 200)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 20)
        # end
        os.remove(self.csvfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_fits(self):
        print('\n*** test_load_append_table_fits ***\n')
        data = create_test_data()
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        self.con.drop_table(os.path.splitext(self.fitsfile)[0].upper())
        # name from filename
        command = "load_table %s " % self.fitsfile
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.fitsfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)

        # appending
        command = "append_table %s " % self.fitsfile
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.fitsfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(os.path.splitext(self.fitsfile)[0].upper())
        os.remove(self.fitsfile)

    def test_load_append_table_name_fits(self):
        print('\n*** test_load_append_table_name_fits ***\n')
        data = create_test_data()
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # name from tablename
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.fitsfile,
                                                    self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        # appending
        command = "append_table %s --tablename %s" % (self.fitsfile,
                                                      self.tablename)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_chunk_fits(self):
        print('\n*** test_load_append_table_chunk_fits ***\n')
        data = create_test_data()
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # chunksize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --chunksize %s" % (
            self.fitsfile, self.tablename, self.chunk)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        # appending
        command = "append_table %s --tablename %s --chunksize %s" % (
            self.fitsfile, self.tablename, self.chunk)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_memory_fits(self):
        print('\n*** test_load_append_table_memory_fits ***\n')
        data = create_test_data()
        for i in range(4):
            data = np.concatenate((data, data))
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # memsize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --memsize %s" % (
            self.fitsfile, self.tablename, self.memsize)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 16)
        # appending
        command = "append_table %s --tablename %s --memsize %s" % (
            self.fitsfile, self.tablename, self.memsize)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2 * 16)
        # end
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_memory_chunk_fits(self):
        print('\n*** test_load_append_table_memory_chunk_fits ***\n')
        data = create_test_data()
        for i in range(4):
            data = np.concatenate((data, data))
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # memsize
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s --memsize %s --chunksize %s" % (
            self.fitsfile, self.tablename, self.memsize, self.chunk * 10)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 16)
        # appending
        command = "append_table %s --tablename %s --memsize %s --chunksize %s" % (
            self.fitsfile, self.tablename, self.memsize, self.chunk * 200)
        self.con.onecmd(command)
        cursor = self.con2.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2 * 16)
        # end
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_loadsql(self):
        print('\n*** test_loadsql ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        os.remove(self.csvfile)
        query = """
        -- This is a comment
        select RA, DEC from %s -- this is another comment
         ; > %s
        """ % (self.tablename, self.csvfile)
        with open(self.sqlfile, 'w') as F:
            F.write(query)

        command = "loadsql %s" % (self.sqlfile)
        self.con.onecmd(command)
        self.assertTrue(os.path.exists(self.csvfile))
        df = pd.read_csv(self.csvfile, sep=',')
        self.assertEqual(len(df), self.nrows)
        os.remove(self.csvfile)
        self.assertFalse(os.path.exists(self.csvfile))
        os.remove(self.sqlfile)

    @unittest.skip("Need to re evaluate")
    def test_inline(self):
        print('\n*** test_inline ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.drop_table(self.tablename)
        command = "load_table %s --tablename %s" % (self.csvfile,
                                                    self.tablename)
        self.con.onecmd(command)
        command = "import wrapped as Y"
        self.con.onecmd(command)
        command = "select /*p: Y.my_sum(ra,dec) as testcol */, dec from %s ; > %s" % (
            self.tablename, self.csvfile)
        self.con.onecmd(command)
        self.assertTrue(os.path.exists(self.csvfile))
        df = pd.read_csv(self.csvfile, sep=',')
        self.assertEqual(len(df), self.nrows)
        self.assertTrue('TESTCOL' in df.columns.values.tolist())
        os.remove(self.csvfile)
        self.assertFalse(os.path.exists(self.csvfile))
        self.con.drop_table(self.tablename)
Exemple #16
0
def rawdata_exp_query(args):

    import easyaccess as ea
    import pandas as pd
    import numpy as np 

    if args.verbose>0: 
        print 
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print 'rawdata_exp_query'
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print 

    # Updated query to get rid of any commas in the field, object, or program name
    #  (which could play havoc downstream)...
    query="""
        select e.expnum, 
               e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, 
               e.band, e.nite, e.mjd_obs, 
               REPLACE(e.field,   ',', ' ') as field, 
               REPLACE(e.object,  ',', ' ') as object, 
               REPLACE(e.program, ',', ' ') as program, 
               t.pfw_attempt_id, t.tag, 
               qa.source, qa.t_eff, qa.psf_fwhm, qa.f_eff, 
               qa.skybrightness, qa.b_eff, qa.cloud_apass, qa.cloud_nomad,
               qa.n_apass, qa.n_nomad, qa.c_eff, qa.skytilt, qa.astrom_sigma,
               qa.astrom_offset, qa.astrom_ndets, qa.astrom_chi2, qa.nobjects,
               qa.flag, qa.calnac, qa.cloud_des, qa.n_des
        from prod.exposure e, prod.proctag t, prod.qa_summary qa
        where t.pfw_attempt_id=qa.pfw_attempt_id and
              e.expnum=qa.expnum and 
              t.tag='%s' and
              qa.expnum between %d and %d
        order by qa.expnum
        """ % (args.tag, args.expnumMin, args.expnumMax)
    

    #select e.expnum,
    #       e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, 
    #       e.band, e.nite, e.mjd_obs, e.field, e.object, e.program,  
    #       q.reqnum,q.unitname,q.attnum,
    #       qa.t_eff,qa.c_eff,qa.b_eff,qa.skybrightness,qa.fwhm_asec 
    #from exposure e,finalcut_eval qa,proctag t,pfw_attempt q  
    #where t.pfw_attempt_id=qa.pfw_attempt_id and 
    #      e.expnum=qa.expnum and 
    #      q.id=t.pfw_attempt_id and 
    #      tag like 'Y3A2_MISC' 
    #order by e.expnum;

    #query="""
    #    select e.expnum, 
    #           e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, 
    #           e.band, e.nite, e.mjd_obs, e.field, e.object, e.program, 
    #           t.pfw_attempt_id, t.tag, 
    #           f.t_eff, f.f_eff, f.b_eff, f.c_eff, 
    #           f.fwhm_asec, f.ellipticity, f.skybrightness, 
    #           f.cloud_apass, f.cloud_nomad, f.cloud_catalog, 
    #           f.n_apass, f.n_nomad,
    #           f.cloud_des, f.n_des
    #    from prod.exposure e, prod.proctag t, prod.finalcut_eval f
    #    where t.pfw_attempt_id=f.pfw_attempt_id and
    #          e.expnum=f.expnum and 
    #          t.tag='%s' and
    #          e.expnum between %d and %d
    #    order by e.expnum
    #    """ % (args.tag, args.expnumMin, args.expnumMax)
    
    if args.verbose>0: print query
    
    outputFile=args.outputFile
    
    # Open connection to db...
    connection=ea.connect('desoper') 
    
    # Make query and save to output file...
    connection.query_and_save(query,outputFile)

    # Close connection to db...
    connection.close()

    if args.verbose>0: print

    return 0
Exemple #17
0
def run(args):
	xs = float(args.xsize)
	ys = float(args.ysize)
	colors = args.colors.split(',')
	usernm = ''
	jobid = ''
	outdir = ''
	
	if rank == 0:
		start = time.time()
		if args.db == 'DR1':
			db = 'desdr'
		elif args.db == 'Y3A2':
			db = 'dessci'
		
		# This puts any input type into a pandas dataframe
		if args.csv:
			userdf = pd.DataFrame(pd.read_csv(args.csv))
		elif args.ra:
			coords = {}
			coords['RA'] = args.ra
			coords['DEC'] = args.dec
			userdf = pd.DataFrame.from_dict(coords, orient='columns')
		elif args.coadd:
			coadds = {}
			coadds['COADD_OBJECT_ID'] = args.coadd
			userdf = pd.DataFrame.from_dict(coadds, orient='columns')
		
		df = pd.DataFrame()
		unmatched_coords = {'RA':[], 'DEC':[]}
		unmatched_coadds = []
		
		conn = ea.connect(db)
		curs = conn.cursor()
		
		usernm = str(conn.user)
		jobid = str(uuid.uuid4())
		outdir = OUTDIR + '/' + usernm + '/' + jobid + '/'
		tablename = 'BTL_'+jobid.upper().replace("-","_")	# "BulkThumbs_List_<jobid>"
		
		if 'RA' in userdf:
			print(userdf.head())
			
			if args.db == 'Y3A2':
				ra_adjust = [360-userdf['RA'][i] if userdf['RA'][i]>180 else userdf['RA'][i] for i in range(len(userdf['RA']))]
				userdf = userdf.assign(RA_ADJUSTED = ra_adjust)
				userdf.to_csv(tablename+'.csv', index=False)
				conn.load_table(tablename+'.csv', name=tablename)
				
				query = "select temp.RA, temp.DEC, temp.RA_ADJUSTED, temp.RA as ALPHAWIN_J2000, temp.DEC as DELTAWIN_J2000, m.TILENAME from Y3A2_COADDTILE_GEOM m, {} temp where (m.CROSSRA0='N' and (temp.RA between m.RACMIN and m.RACMAX) and (temp.DEC between m.DECCMIN and m.DECCMAX)) or (m.CROSSRA0='Y' and (temp.RA_ADJUSTED between m.RACMIN-360 and m.RACMAX) and (temp.DEC between m.DECCMIN and m.DECCMAX))".format(tablename)
				
				df = conn.query_to_pandas(query)
				curs.execute('drop table {}'.format(tablename))
				os.remove(tablename+'.csv')
			
			if args.db == 'DR1':
				for i in range(len(userdf)):
					ra = userdf['RA'][i]
					ra180 = ra
					if ra > 180:
						ra180 = 360 - ra
					
					if args.db == 'DR1':
						query = "select * from (select TILENAME from DR1_TILE_INFO where (CROSSRA0='N' and ({0} between RACMIN and RACMAX) and ({1} between DECCMIN and DECCMAX)) or (CROSSRA0='Y' and ({2} between RACMIN-360 and RACMAX) and ({1} between DECCMIN and DECCMAX))) where rownum=1".format(ra, userdf['DEC'][i], ra180)
					
					f = conn.query_to_pandas(query)
					if f.empty:
						unmatched_coords['RA'].append(userdf['RA'][i])
						unmatched_coords['DEC'].append(userdf['DEC'][i])
					else:	
						df = df.append(f)
		
		if 'COADD_OBJECT_ID' in userdf:
			if args.db == 'Y3A2':
				conn.load_table(args.csv, name=tablename)
				
				query = "select temp.COADD_OBJECT_ID, m.ALPHAWIN_J2000, m.DELTAWIN_J2000, m.RA, m.DEC, m.TILENAME from Y3A2_COADD_OBJECT_SUMMARY m, {} temp where temp.COADD_OBJECT_ID=m.COADD_OBJECT_ID".format(tablename)
				
				df = conn.query_to_pandas(query)
				curs.execute('drop table {}'.format(tablename))
			
			if args.db == 'DR1':
				for i in range(len(userdf)):
					query = "select COADD_OBJECT_ID, ALPHAWIN_J2000, DELTAWIN_J2000, RA, DEC, TILENAME from DR1_MAIN where COADD_OBJECT_ID={0}".format(userdf['COADD_OBJECT_ID'][i])
					
					f = conn.query_to_pandas(query)
					if f.empty:
						unmatched_coadds.append(userdf['COADD_OBJECT_ID'][i])
					else:
						df = df.append(f)
		
		conn.close()
		print('finished query')
		print(len(df))
		print(df.head())
		df = df.sort_values(by=['TILENAME'])
		
		chunksize = int(df.shape[0] / nprocs) + (df.shape[0] % nprocs)
		df = [ df[ i:i+chunksize ] for i in range(0, df.shape[0], chunksize) ]
		
		end = time.time()
		print('Querying took (s): ' + str(end-start))
		print(unmatched_coords)
		print(unmatched_coadds)
	
	else:
		df = None
	
	usernm, jobid, outdir = comm.bcast([usernm, jobid, outdir], root=0)
	#outdir = usernm + '/' + jobid + '/'
	df = comm.scatter(df, root=0)
	
	tilenm = df['TILENAME'].unique()
	for i in tilenm:
		tiledir = TILES_FOLDER + '/' + i + '/'
		udf = df[ df.TILENAME == i ]
		udf = udf.reset_index()
		
		size = u.Quantity((ys, xs), u.arcmin)
		positions = SkyCoord(udf['ALPHAWIN_J2000'], udf['DELTAWIN_J2000'], frame='icrs', unit='deg', equinox='J2000', representation_type='spherical')
		
		if args.make_tiffs or args.make_pngs:
			MakeTiffCut(tiledir, outdir, positions, xs, ys, udf, args.make_tiffs, args.make_pngs)
			"""
			# 20180823 - I can't think of a good reason for this to NOT be included within the function above, so, merge?
			imgname = glob.glob(tiledir + '*.tiff')
			try:
				im = Image.open(imgname[0])
			except IOError as e:		# Might need to change to indexerror - "IndexError: list index out of range"
				print('No TIFF file found for tile ' + str(i) + '. Will not create true-color cutout.')
			else:
				MakeTiffCut(tiledir, outdir, im, positions, xs, ys, udf, args.make_tiffs, args.make_pngs)
			"""
		
		if args.make_fits:
			MakeFitsCut(tiledir, outdir, size, positions, colors, udf)
query8 = 'select * from (select COADD_OBJECT_ID from DR1_MAIN where TILENAME=\'DES0308-1958\') where rownum<{};'.format(r)
r = random.randint(1, m)
total += r
query9 = 'select * from (select COADD_OBJECT_ID from DR1_MAIN where TILENAME=\'DES0311-3749\') where rownum<{};'.format(r)
r = random.randint(1, m)
total += r
query10 = 'select * from (select COADD_OBJECT_ID from DR1_MAIN where TILENAME=\'DES0435-2623\') where rownum<{};'.format(r)
r = random.randint(1, m)
total += r
query11 = 'select * from (select COADD_OBJECT_ID from DR1_MAIN where TILENAME=\'DES2147-5540\') where rownum<{};'.format(r)
r = random.randint(1, m)
total += r
query12 = 'select * from (select COADD_OBJECT_ID from DR1_MAIN where TILENAME=\'DES2246-4914\') where rownum<{};'.format(r)
"""

conn = ea.connect('desdr')
curs = conn.cursor()

df1 = conn.query_to_pandas(query1)
df2 = conn.query_to_pandas(query2)
df3 = conn.query_to_pandas(query3)
df4 = conn.query_to_pandas(query4)
df5 = conn.query_to_pandas(query5)
df6 = conn.query_to_pandas(query6)
df7 = conn.query_to_pandas(query7)
df8 = conn.query_to_pandas(query8)
df9 = conn.query_to_pandas(query9)
df10 = conn.query_to_pandas(query10)
df11 = conn.query_to_pandas(query11)
df12 = conn.query_to_pandas(query12)
def rawdata_se_object_firstcut_query(args):

    import easyaccess as ea
    import numpy as np

    if args.verbose > 0:
        print
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print 'rawdata_se_object_firstcut_query'
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print

    connection = ea.connect('desoper')

    # Based on query from Robert Gruendl from early Y3A1 tests,
    #  with some more modern (Y4A1 vintage) suggestions from
    #  Eli Rykoff...
    #
    # (Originally, included "b.accepted='True'" in WHERE
    #  statement for z, but realized that this omitted
    #  lots of standard star field stars.  Now, this
    #  WHERE clause is included as part of a
    #  "(z.accepted='True' or e.program='photom-std-field')"
    #  WHERE clause for the exposure query.
    #  Thus, be careful where you want to include standard
    #  star fields or not downstream...)

    query="""
    with x as (
          select /*+materialize */ pfw_attempt_id
          from prod.proctag
          where tag='%s'
          ),
    y as (
          select /*+materialize */ a.expnum as expnum,
                                   max(a.lastchanged_time) as evaltime
          from prod.firstcut_eval a
          where a.analyst!='SNQUALITY'
          group by a.expnum
          ),
    z as (
          select /*+materialize */ b.expnum
          from prod.firstcut_eval b, y
          where b.expnum=y.expnum and
                b.lastchanged_time=y.evaltime and
                b.expnum between %d and %d
          )
    select cast(c.expnum as NUMBER(8)) as EXPNUM,
           c.filename, c.band, 
           cast(c.ccdnum as NUMBER(4)) as CCDNUM, 
           o.object_number, o.x_image, o.y_image, o.ra, o.dec,
           o.flux_psf, o.fluxerr_psf, o.flux_aper_8, fluxerr_aper_8,
           o.class_star, o.spread_model, o.spreaderr_model, o.flags
    from prod.se_object o, prod.catalog c, x, z
    where x.pfw_attempt_id=c.pfw_attempt_id and
          c.filetype='cat_firstcut' and
          c.expnum=z.expnum and
          o.filename=c.filename and
         (o.x_image between 100 and 1900) and
         (o.y_image between 100 and 3950) and
          o.flags = 0 and o.imaflags_iso = 0 and
         (o.spread_model between -0.01 and 0.01) and o.class_star > 0.80 and
        ((1.086*o.fluxerr_psf/o.flux_psf) between 0.0001 and 0.1000)
        """ % \
        (args.tag, args.expnumMin, args.expnumMax)

    if args.verbose > 0: print query

    outputFiles = args.outputFiles

    connection.query_and_save(query, outputFiles)

    connection.close()

    if args.verbose > 0: print

    return 0
def main():
    desoper = ea.connect(section='desoper')
    dessci = ea.connect(section='dessci')

    #y4_exps = pd.read_csv('/Volumes/lsa-gerdes/wsdiff_catalogs/season240/nofakes/wsdiff_season240_Y4_griz_nofakes.csv')

    #y4_exps.to_pickle('/Users/lynuszullo/pyOrbfit/Y4_Transient_Search/y4_pickle')

    #s240_df = pd.read_pickle('/Users/lynuszullo/pyOrbfit/Y4_Transient_Search/y4_pickle')
    #s240_expnums = s240_df['expnum'].drop_duplicates()

    ra_deg = 315
    dec_deg = -46

    catalogue = 0
    diffimg = 0
    astcount = 0
    diff_asteroids = 0
    cat_asteroids = 0
    catdiffast = 0

    #for exp in s240_expnums:

    ra = ephem.degrees(ra_deg * ephem.pi / 180)
    dec = ephem.degrees(dec_deg * ephem.pi / 180)
    box = 10000.  # arcsec
    season = 240
    expnum = 572126

    exposure_info = desoper.query_to_pandas('SELECT RADEG, DECDEG, DATE_OBS FROM PROD.EXPOSURE WHERE ' +
                                            'EXPNUM = ' + str(expnum))
    ra_exp, dec_exp = np.radians(exposure_info.RADEG[0]), np.radians(exposure_info.DECDEG[0])
    ra_exp_deg, dec_exp_deg = np.degrees(ra), np.degrees(dec)
    date = exposure_info.DATE_OBS[0]  # type: str
    date = date.replace('-', '/')
    date = date.replace('T', ' ')
    date = date.split('.')[0]

    c = SkyCoord(ra=ra_exp * u.rad, dec=dec_exp * u.rad, frame='icrs')

    asteroids = MPChecker(str(c.ra.to_string(unit=u.hour, sep=':')),
                          str(c.dec.to_string(unit=u.degree, sep=':')),
                          date)#, box=box // 60 + 1)

    diff_img_df = get_diffimg_season(season)
    diff_img_df = diff_img_df[diff_img_df['expnum'] == expnum]


    asteroids = asteroids.rename(columns={'mpcRA': 'ra', 'mpcDec': 'dec'})
    asteroids.ra = asteroids.ra.apply(MPCToTNO.ra_convert)
    asteroids.dec = asteroids.dec.apply(MPCToTNO.dec_convert)

    asteroids.ra = asteroids.ra.apply(lambda ra: float(Angle(ra + ' hours').to_string(unit=u.degree, decimal=True)))
    asteroids.dec = asteroids.dec.apply(lambda dec: float(Angle(dec + ' degrees').to_string(unit=u.degree, decimal=True)))

    assert not asteroids.empty
    print "Asteroids Found!"

    se_df = get_SE_detections(desoper, expnum=expnum)
    assert not se_df.empty
    print "Exposure Not Empty!"
    # se_df = get_SE_detections(desoper, ra, dec, box, expnum=expnum)
    coadd_df = get_coadd_cutout(dessci, ra_exp, dec_exp, box)

    catalog_df = get_transient_detections(se_df, coadd_df, 1)
    catalog_df['date'] = catalog_df['date'].apply(lambda date: str(ephem.date(date)))
    # diff_img_df = get_diffimg_cutout(ra, dec, box, season, "../wsdiff_catalogs")
    # diff_img_df = get_diffimg_season(season, "../wsdiff_catalogs")

    pickle(coadd=coadd_df, catalog=catalog_df, diff_img=diff_img_df)
    # _, _, catalog_df, diff_img_df = unpickle(catalog=True, diff_img=True)

    # diff_img_df.to_pickle('diff_img_df.pickle')

    # catalog_df = pd.read_pickle('catalog_df_LARGE.pickle')
    # diff_img_df = pd.read_pickle('diff_img_df_LARGE.pickle')

    # drawT_eff(catalog_df, diff_img_df)

    # diff_img_df['star_like'] = diff_img_df['spread_model'] + 3 * diff_img_df['spreaderr_model']
    #
    # diff_img_df = diff_img_df[(diff_img_df['star_like'] < 0.1) & (diff_img_df['star_like'] > -0.1)]
    # plt.plot(diff_img_df['mag'], diff_img_df['star_like'], linestyle='None', marker='o')
    # plt.xlabel('Magnitude')
    # plt.ylabel("SM + 3 * SEM")
    # plt.title("Starlike Diff_img Detections over Magnitude")
    # plt.tight_layout()
    # plt.savefig("Mag_vs_Starlike_Catalog.png")

    catalog_df['mag'] = catalog_df.apply(lambda row: mag_calib(row), axis=1)

    catalog_df['star_like'] = catalog_df['spread_model'] + 3 * catalog_df['spreaderr_model']
    # catalog_df.to_pickle('catalog_df_LARGE_mag.pickle')
    # catalog_df = pd.read_pickle('catalog_df_LARGE_mag.pickle')
    print 'star_like column created'
    # print catalog_df.head(10)
    catalog_df = catalog_df[(catalog_df['star_like'] > -0.05) & (catalog_df['star_like'] < 0.05)]
    print 'cuts made'

    # catalog_df = catalog_df[(catalog_df['star_like'] > -0.1) & (catalog_df['star_like'] < 0.1)]
    # plt.plot(catalog_df['mag'], catalog_df['star_like'], linestyle='None', marker='o')
    # plt.xlabel('Magnitude')
    # plt.ylabel("SM + 3 * SEM")
    # plt.title("Starlike Catalog Detections over Magnitude")
    # plt.tight_layout()
    # plt.savefig("Mag_vs_Starlike_Catalog.png")

    # overlap_coadd = overlap(diff_img_df, coadd_df, datematch=False, dropOverlap=True)
    selfoverlap_df = self_overlap(catalog_df)

    # overlap_df = overlap(diff_img_df, catalog_df)
    try:
        asteroid_catalog = overlap(catalog_df, asteroids, dropOverlap=False, datematch=False, threshold=4)
        asteroid_diffimg = overlap(diff_img_df, asteroids, dropOverlap=False, datematch=False, threshold=4)
        astcat_astdiff = df_intersection(asteroid_catalog, asteroid_diffimg, 1 / 3600)
        # = overlap(asteroid_catalog, asteroid_diffimg, dropOverlap=True, datematch=False,
        #                        threshold=1)
    except ValueError:  # raised if `y` is empty.
        pass

    print "Catalog Only: ", len(catalog_df)
    print "Diff Img Only: ", len(diff_img_df)
    # print "Both: ", len(overlap_df)
    # print "Coadd Matches: ", len(overlap_coadd)
    print "Self Overlap: ", len(selfoverlap_df)
    print "Asteroids: ", len(asteroids)
    print "Asteroids Found via Cat: ", len(asteroid_catalog)
    print "Asteroids Found via DiffImg: ", len(asteroid_diffimg)

    # expnum_ccd_band_set = set()
    #
    # for index, row in catalog_df.iterrows():
    #     expnum_ccd_band_set.add((row['expnum'], row['ccd'], row['band']))
    #
    # bleed_df = grab_bleedtrails(desoper, expnum_ccd_band_set)
    # print bleed_df.head()
    # print len(bleed_df)

    cat_detect = plt.scatter(catalog_df['ra'], catalog_df['dec'], linestyle='None', color='g', marker='.', s=0.5)
    diff_detect = plt.scatter(diff_img_df['ra'], diff_img_df['dec'], linestyle='None',color='c', marker='.', s=0.5) #see if any fall in coadd already
    asteroid_plt = plt.scatter(asteroids['ra'], asteroids['dec'], linestyle='None', color='r', marker='o', alpha=0.4, label='asteroids')
    cat_asteroid = plt.scatter(asteroid_catalog['ra'], asteroid_catalog['dec'], linestyle='None', color='blue', marker='o', label = 'catalogue')
    diff_asteroid = plt.scatter(asteroid_diffimg['ra'], asteroid_diffimg['dec'], linestyle='None', color='black', marker='o', label = 'diffimg')
    cat_diff_asteroid = plt.scatter(astcat_astdiff['ra'], astcat_astdiff['dec'], linestyle='None', color='purple', marker='o', label = 'catdiff')

    plt.legend((cat_detect, diff_detect, asteroid_plt, cat_asteroid, diff_asteroid, cat_diff_asteroid),
    ('Catalogue', 'DiffImg', 'Asteroids', 'Catalogue Detections', 'Diffimg Detections', 'Catalogue and Diffimg'),
        scatterpoints=1,
        loc='lower left',
        ncol=3,
        fontsize=8)

    title = "Exposure " + str(expnum)
    plt.title(title)
    plt.savefig('/Users/lynuszullo/pyOrbfit/Y4_Transient_Search/' + title)
    print "Saved as " + title
    # plt.savefig('detectionsLarge_0.05_cut_Overlap_Cat_Removed.png')
    #
    # plt.figure()
    # plt.hist(catalog_df['star_like'], 50)
    # plt.savefig('hist_starlike_Large_0.05_cut_Overlap_Cat_Removed.png')
    #
    # plt.figure()
    # plt.hist(catalog_df['mag'], 50, range=(20, 30))
    # plt.savefig('hist_mag_Large_0.05_cut_Overlap_Cat_Removed.png')
    #
    # plt.savefig('detectionsLargeCoadd_DI_Removed_Overlap_Cat_Removed_SpreadCut.png')
    # plt.savefig("exposure" + str(expnum) + "_zoom.png")
    expnumCCD_list = []
    catalog_list = []
    diff_img_list = []
    overlap_list = []
    coadd_list = []
    asteroid_list = []

    for index, row in catalog_df.iterrows():
        expnumCCD_list.append((row['expnum'], row['ccd']))
        catalog_list.append((row['ra'], row['dec'], row['expnum'], row['ccd']))

    for index, row in diff_img_df.iterrows():
        expnumCCD_list.append((row['expnum'], row['ccd']))
        diff_img_list.append((row['ra'], row['dec'], row['expnum'], row['ccd']))

    for index, row in asteroid_catalog.iterrows():
        expnumCCD_list.append((row['expnum'], row['ccd']))
        overlap_list.append((row['ra'], row['dec'], row['expnum'], row['ccd']))

    # for index, row in coadd_df.iterrows():
    #     coadd_list.append((row['ra'], row['dec']))

    for index, row in asteroids.iterrows():
        asteroid_list.append((row['ra'], row['dec']))
    os.chdir('/Users/lynuszullo/pyOrbfit/Y4_Transient_Search')
    Find_imgs.findImgs(expnumCCD_list, catalog_list, diff_img_list, overlap_list, ra_deg,
                       dec_deg, box * 2, coadd_list=coadd_list, asteroid_list=asteroid_list,
                       keep_fits=False, only_asteroids=True)
Exemple #21
0
    parser = argparse.ArgumentParser(description=description)
    parser.add_argument('config')
    parser.add_argument('-t','--table',action='append')
    parser.add_argument('-f','--force',action='store_true')
    parser.add_argument('-v','--verbose',action='store_true')
    parser.add_argument('-d','--dryrun',action='store_true')
    args = parser.parse_args()

    if args.verbose:
        logging.getLogger().setLevel(logging.DEBUG)

    config = yaml.load(open(args.config))
    section = config['db']
    data = yaml.load(open(config['dbtables']))

    con = ea.connect(section=section,quiet=True)
    con.drop_index = drop_index
    cursor = con.cursor()

    tables = data.keys()
    for table in tables:
        if args.table and (table not in args.table): continue

        logging.debug(table)
        idxname = data[table].get('idxname',table.split('_')[0])

        params = dict(idxname=idxname,table=table)

        for key,val in data[table]['columns'].items():
            if not val.get('index',None): continue
            for b in BANDS:
Exemple #22
0
    def __init__(self, trigger_id, jsonfilelist, triggerdir, datadir, real,
                 trigger_path):

        #os.system('kinit -k -t /var/keytab/desgw.keytab desgw/des/[email protected]')
        tstart = time.time()

        if real:
            self.backend = FileBackend("./realdb")
        else:
            self.backend = FileBackend("./testdb")

        try:
            thisevent = self.backend.get(Trigger, {'id': trigger_id})
            print 'Found this event in desgw database...'
        except Trigger.DoesNotExist:
            thisevent = Trigger({
                'id':
                trigger_id,
                'jsonfilelist':
                jsonfilelist,
                'triggerpath':
                triggerdir,
                'mapspath':
                datadir,
                'jobids': [
                    (0, 'jsonfile_corresponding_to_jobid.json'),
                ],
            })
            print 'Database entry created!'

        self.trigger_id = trigger_id
        self.trigger_path = trigger_path

        self.backend.save(thisevent)
        self.backend.commit()

        with open(os.path.join(triggerdir, "strategy.yaml"), "r") as f:
            self.config = yaml.safe_load(f)
        self.filterobslist = np.array(self.config['exposure_filter'],
                                      dtype='str')
        self.strategydict = {}

        for f in np.unique(self.filterobslist):
            self.strategydict[f] = len(
                self.filterobslist[self.filterobslist == f])

        self.connection = ea.connect(DATABASE)
        self.cursor = self.connection.cursor()

        self.jsonfilelist = jsonfilelist

        print self.jsonfilelist
        if hardjson:
            self.jsonfilelist = hj

        self.trigger_id = trigger_id
        self.datadir = datadir
        self.triggerdir = triggerdir
        self.processingdir = os.path.join(self.triggerdir, 'PROCESSING')
        if not os.path.exists(self.processingdir):
            os.makedirs(self.processingdir)

        dire = './processing/' + trigger_id + '/'
        if not os.path.exists(dire):
            os.makedirs(dire)

        with open(os.path.join(triggerdir, "strategy.yaml"), "r") as f:
            self.strategy = yaml.safe_load(f)

        with open("jobmanager.yaml", "r") as g:
            self.jmconfig = yaml.safe_load(g)


        q1 = "select expnum,nite,mjd_obs,telra,teldec,band,exptime,propid,obstype,object from exposure where " \
             "nite>20130828 and nite<20150101 and expnum<300000 and obstype='object' order by expnum"  # y1 images
        self.connection.query_and_save(q1, './processing/exposuresY1.tab')

        q2 = "select expnum,nite,mjd_obs,radeg,decdeg,band,exptime,propid,obstype,object from prod.exposure where " \
             "nite>20150901 and obstype='object' order by expnum"  # y2 and later
        self.connection.query_and_save(q2, './processing/exposuresCurrent.tab')

        os.system(
            'cat ./processing/exposuresY1.tab ./processing/exposuresCurrent.tab > ./processing/exposures.list'
        )

        self.submit_all_jsons_for_sejobs(
        )  #preps all DES images that already exist
        tfin = time.time()
        print 'TOTAL SE JOBS TIME', tfin - tstart
        #sys.exit()
        self.monitor_images_from_mountain(
        )  #A loop that waits for images off mountain and submits for processing
Exemple #23
0
def rawdata_exp_firstcut_query(args):

    import easyaccess as ea
    import pandas as pd
    import numpy as np 

    if args.verbose>0: 
        print 
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print 'rawdata_exp_firstcut_query'
        print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
        print 

    query="""
    with x as (
          select /*+materialize */ pfw_attempt_id, tag
          from prod.proctag
          where tag='%s'
          ),
    y as (
          select /*+materialize */ a.expnum as expnum,
                                   max(a.lastchanged_time) as evaltime
          from prod.firstcut_eval a
          where a.analyst!='SNQUALITY'
          group by a.expnum
          ), 
    z as (
          select /*+materialize */ b.*
          from prod.firstcut_eval b, y
          where b.expnum=y.expnum and 
                b.lastchanged_time=y.evaltime and 
                b.expnum between %d and %d
          )
    select e.expnum, 
           e.radeg as EXPRA, e.decdeg as EXPDEC, e.exptime, e.airmass, 
           e.band, e.nite, e.mjd_obs, e.field, e.object, e.program, 
           x.pfw_attempt_id, x.tag,
           z.analyst as SOURCE, z.t_eff, z.fwhm_asec as PSF_FWHM, z.f_eff,
           z.skybrightness, z.b_eff, z.cloud_apass, z.cloud_nomad,
           z.n_apass, z.n_nomad, z.c_eff, z.cloud_des, z.n_des, z.accepted
    from prod.exposure e, x, z
    where e.expnum=z.expnum and
          x.pfw_attempt_id=z.pfw_attempt_id and
         (z.accepted='True' or e.program='photom-std-field') 
    order by e.expnum
    """ % \
        (args.tag, args.expnumMin, args.expnumMax)

    
    if args.verbose>0: print query
    
    outputFile=args.outputFile
    
    # Open connection to db...
    connection=ea.connect('desoper') 
    
    # Make query and save to output file...
    connection.query_and_save(query,outputFile)

    # Close connection to db...
    connection.close()

    if args.verbose>0: print

    return 0
Exemple #24
0
 def test_connect_desoper(self):
     con = ea.connect('desoper', quiet=True)
     self.assertTrue(con.ping())
Exemple #25
0
    if update:
        os.environ.update(env)

    return env


############# Image Processing ################

############ create new season number ###########
### Y6 will start with 600 (use 417 for mock runs) #####
import easyaccess
import fitsio

query = 'SELECT max(SEASON) from marcelle.SNFAKEIMG where SEASON < 800;'
connection = easyaccess.connect('destest')
connection.query_and_save(query, 'testfile.fits')
data = fitsio.read('testfile.fits')
print(data[0][0])

newseason = (int(data[0][0] / 100) + 1) * 100
newseason = 519
print("the season number for this event is " + str(newseason))
print('')

#Update season number in dagmaker.rc
os.system("sed -i -e '/^SEASON/s/=.*$/=" + str(newseason) + "/' " +
          DIR_SOURCE + "/gw_workflow/dagmaker.rc")

#Make curatedExposure.list
os.system("bash " + DIR_SOURCE + "/make_curatedlist.sh")
    parser = argparse.ArgumentParser(description=description)
    args = parser.parse_args()

    data = create_test_data()
    nrows = len(data)

    # Create the data files
    fitsfile = create_test_fits(data=data)
    csvfile = create_test_csv(data=data)
    tabfile = create_test_tab(data=data)

    filenames = [fitsfile, csvfile, tabfile]

    # Try loading through the python interface
    # NOTE: This requires a desservice.ini file
    conn = ea.connect()

    query = 'select * from %s' % BASENAME

    # Complains when the table doesn't exist, we could add:
    # if conn.check_table_exists(BASENAME): conn.drop_table(BASENAME)

    for filename in filenames:
        # First try loading through python interface
        print("*** TESTING PYTHON INTERFACE ***")
        conn.drop_table(BASENAME)
        conn.load_table(filename)
        df = conn.query_to_pandas(query)
        assert len(df) == nrows

        # Then try loading with explicit tablename
Exemple #27
0
import numpy as np
import easyaccess as ea
import shapely.geometry as geo

con_op = ea.connect(section='desoper')

q2 = 'SELECT f.RA_CENT,f.DEC_CENT,f.RACMAX,f.RACMIN,f.DECCMAX,f.DECCMIN,f.RAC1,f.RAC2,f.RAC3,f.RAC4,f.DECC1,f.DECC2,f.DECC3,f.DECC4,f.ID,d.TILENAME FROM RUMBAUGH.Y1A1_TILENAMES_LIST d,FELIPE.COADDTILE_GEOM_NEW f WHERE d.tilename=f.tilename'

TDF = con_op.query_to_pandas(q2)

Tpolys = np.zeros(len(TDF), dtype='object')
altpolys = np.zeros(0, dtype='object')
altpolynames = np.zeros(0, dtype='|S30')
altpolynums = np.zeros(0, dtype='i8')
altpolyracens, altpolydeccens = np.zeros(0, dtype='f8'), np.zeros(0,
                                                                  dtype='f8')
for i in range(0, len(TDF)):
    Tpolys[i] = geo.Polygon([(TDF['RAC1'][i], TDF['DECC1'][i]),
                             (TDF['RAC2'][i], TDF['DECC2'][i]),
                             (TDF['RAC3'][i], TDF['DECC3'][i]),
                             (TDF['RAC4'][i], TDF['DECC4'][i])])
    if ((TDF['RACMAX'][i] < TDF['RACMIN'][i]) | (TDF['RACMIN'][i] < 0) |
        (TDF['RACMAX'][i] > 360)):
        ractmps = np.array(
            [TDF['RAC1'][i], TDF['RAC2'][i], TDF['RAC3'][i], TDF['RAC4'][i]])
        ractmps[ractmps > 300] -= 360
        Tpolys[i] = geo.Polygon([(ractmps[0], TDF['DECC1'][i]),
                                 (ractmps[1], TDF['DECC2'][i]),
                                 (ractmps[2], TDF['DECC3'][i]),
                                 (ractmps[3], TDF['DECC4'][i])])
        ractmps = np.array(
Exemple #28
0
    parser = argparse.ArgumentParser(description=description)
    args = parser.parse_args()

    data = create_test_data()
    nrows = len(data)

    # Create the data files
    fitsfile=create_test_fits(data=data)
    csvfile =create_test_csv(data=data)
    tabfile =create_test_tab(data=data)

    filenames = [fitsfile,csvfile,tabfile]

    # Try loading through the python interface
    # NOTE: This requires a desservice.ini file
    conn = ea.connect()

    query = 'select * from %s'%BASENAME

    # Complains when the table doesn't exist, we could add: 
    # if conn.check_table_exists(BASENAME): conn.drop_table(BASENAME)
    
    for filename in filenames:
        # First try loading through python interface
        print("*** TESTING PYTHON INTERFACE ***")
        conn.drop_table(BASENAME)
        conn.load_table(filename)
        df = conn.query_to_pandas(query)
        assert len(df) == nrows

        # Then try loading with explicit tablename
Exemple #29
0
def main():
    cmd = " ".join(sys.argv)
    options, args = parser.parse_args(sys.argv[1:])
    conn = ea.connect(section='destest')
    cursor = conn.cursor()  # create a cursor object to handle db

    fname = options.outfile
    if fname is None:
        print "must specify output file."
        exit()
    out = open(fname, 'w+')
    out.write(master_head_template.format(cmd))

    if options.zpmin:
        # Require minimum zeropoint for highly extincted measurements
        q_zp = " and oi.chip_zero_point > {0:f}".format(options.zpmin)
    else:
        q_zp = " "

    expnumfile = open(options.exptable, 'r')
    expnumstr = '('
    while (1 == 1):
        try:
            expnumstr = expnumstr + expnumfile.readline().split()[0]
        except:
            break
        expnumstr = expnumstr + ','
    expnumstr = expnumstr[:-1] + ')'  # [:-1] to throw out unneeded ','

    query = """\
    SELECT DISTINCT
        oi.ccdnum,
        oi.nite,
        oi.band,
        oi.expnum,
        (oi.chip_ccdgaina + oi.chip_ccdgainb)/2. as gain,
        oi.chip_sigsky_search,
        oi.chip_sigsky_template,
        oi.psf_nea,
        oi.chip_zero_point,
        oi.chip_zero_point_rms,
        fi.field,
        f.mjd_obs,
        fi.rac1,
        fi.decc1,
        fi.rac2,
        fi.decc2,
        fi.rac3,
        fi.decc3,
        fi.rac4,
        fi.decc4
    FROM
        marcelle.snobsinfo oi, marcelle.snforce_image fi, marcelle.snforce f
    WHERE
        oi.image_name_diff = fi.image_name_diff and 
        fi.image_name_diff = f.image_name_diff and
        oi.tile = 0 {0} and
        oi.expnum in {1}
    ORDER BY
        field, oi.ccdnum, oi.nite, oi.band
    """.format(q_zp, expnumstr)
    print expnumstr
    print query
    # Run the query or load pre-saved results.
    if os.path.exists(cache_filename):
        print "Loading from {0}...".format(cache_filename)
        data = np.load(cache_filename)
    else:
        print "Querying database (this may take a minute or two)..."
        t0 = time.time()
        QQ = cursor.execute(query)  # execute query
        QQ.description
        header = [item[0] for item in cursor.description]
        print header
        rows = cursor.fetchall()
        # figure out dtypes for our array so we can reference names:
        dtype = []
        for head in header:
            if (head in ['CCDNUM', 'NITE', 'EXPNUM']):
                dtype.append((head.lower(), int))
            elif (head in ['BAND', 'FIELD']):
                dtype.append((head.lower(), "S10"))
            else:
                dtype.append((head.lower(), float))
        data = np.array(rows, dtype=dtype)
        print data['field']
        print data['band']
        print "Query took {0:.2f} seconds, {1:.2f} MB array.".format(
            time.time() - t0, data.nbytes / 1.e6)
        if options.cache:
            print "Saving to {0}...".format(cache_filename)
            np.save(cache_filename, data)
    msg = "{0:d} records".format(len(data))
    print msg
    out.write('#{0}\n'.format(msg))

    #-------------------------------------------------------------------------
    # BEGIN HACKS

    # Hack to remove dirty data from the table:
    # This is necessary because there were some -9's written to the database.
    mask = data['psf_nea'] > 0.
    data = data[mask]
    msg = (
        "HACK: {0:d} records after trimming entries with PSF_NEA < 0".format(
            len(data)))
    print msg
    out.write('#{0}\n'.format(msg))

    # Hack to cut crazy zeropoint RMS values, including nan
    mask = data['chip_zero_point_rms'] <= 0.5
    data = data[mask]
    msg = ("HACK: {0:d} records after trimming entries with "
           "CHIP_ZERO_POINT_RMS > 0.5".format(len(data)))
    print msg
    out.write('#{0}\n'.format(msg))

    # END HACKS
    # ------------------------------------------------------------------------

    # If input file is defined, read positions.
    if options.infile is not None:
        msg = "Reading positions from {0}".format(options.infile)
        print msg
        positions = []
        with open(options.infile, 'r') as f:
            keys = f.readline().upper().strip().split(',')
            if ('RA' not in keys) or ('DEC' not in keys):
                print "RA and DEC must appear in first line of infile"
                exit(1)
            for line in f:
                positions.append(odict(zip(keys, line.strip().split(','))))

        # Convert RA, DEC to floats
        for pos in positions:
            pos['RA'] = float(pos['RA'])
            pos['DEC'] = float(pos['DEC'])

        ntarget = len(positions)
        msg += '... {0:d} positions'.format(ntarget)
        out.write('#{0}\n'.format(msg))

    # Otherwise, report area from which positions will be selected.
    else:
        msg = "Randomly selecting position from fields:"
        print msg
        out.write('#{0}\n'.format(msg))
        for name in areas.keys():
            msg = ("    {0}: RA=[{1:5.1f},{2:5.1f}] Dec=[{3:5.1f},{4:5.1f}] "
                   "{5:7.4f} deg^2, weight={6:6.4f}".format(
                       name, bounds[name][0], bounds[name][1], bounds[name][2],
                       bounds[name][3], areas[name], weights[name]))
            print msg
            out.write('#{0}\n'.format(msg))
        ntarget = options.n

    out.write('BEGIN LIBGEN\n')
    ntot = 0
    ngood = 0
    noverlap = 0  # keep track of number in overlapping fields
    while ngood < ntarget:
        ntot += 1

        # Get a position
        if options.infile:
            if len(positions) == 0: break
            extrakeys = positions.pop(0)
            ra = extrakeys.pop('RA')
            dec = extrakeys.pop('DEC')
        else:
            ra, dec = get_random_position()
            extrakeys = {}

        mask = point_within_quad(ra, dec, data['rac1'], data['decc1'],
                                 data['rac2'], data['decc2'], data['rac3'],
                                 data['decc3'], data['rac4'], data['decc4'])
        posdata = data[mask]

        # Only write this out if there are 2 or more images (1 image is not
        # enough to make a subtraction).
        if len(posdata) > 1:
            ngood += 1

            # Write header and entries for this position to SIMLIB file
            out.write(
                head_template.format(ngood, ra, dec, len(posdata), 0., 0.27))

            # Write extra keys
            for key, val in extrakeys.iteritems():
                out.write('{}: {}\n'.format(key, val))

            # Get unique fields
            fields = np.unique(posdata['field'])
            if len(fields) > 1:
                noverlap += 1

            for i, field in enumerate(fields):
                fielddata = posdata[posdata['field'] == field]
                ccds = np.unique(fielddata['ccdnum'])

                # For this field, find the first (arbitrary) image in each band
                # and get its CHIP_ZERO_POINT and CHIP_SKYSIG_TEMPLATE
                # to be recorded as TEMPLATE_ZPT and TEMPLATE_SKYSIG.
                #
                # This is made harder by the fact that some fields don't have
                # images in every band. So there's not always a "first" band.
                #
                # NOTE:
                # This is non-robust acrobatics!
                # This info should just be two more columns in the table.
                # Can YOU decipher what this is doing? I sure can't.
                idx = []
                for band in ['g', 'r', 'i', 'z']:
                    try:
                        idx.append(
                            np.flatnonzero(fielddata['band'] == band)[0])
                    except:
                        idx.append(-1)  # -1 indicates no image in given band
                zpstr = [
                    "%6.3f" %
                    fielddata['chip_zero_point'][k] if k != -1 else "0.0"
                    for k in idx
                ]
                zpstr = " ".join(zpstr)
                skystr = [
                    "%7.2f" %
                    fielddata['chip_sigsky_template'][k] if k != -1 else "0.0"
                    for k in idx
                ]
                skystr = " ".join(skystr)

                out.write(
                    field_template.format(field, str(ccds), zpstr, skystr))
                if i == 0:
                    out.write(head_comment)

                for j in range(len(fielddata)):
                    out.write(
                        line_template.format(
                            'S', float(fielddata['mjd_obs'][j]),
                            int(fielddata['expnum'][j]),
                            str(fielddata['band'][j]),
                            float(fielddata['gain'][j]), 0.,
                            float(fielddata['chip_sigsky_search'][j]),
                            float(fielddata['psf_nea'][j]), 0., 0.,
                            float(fielddata['chip_zero_point'][j]),
                            float(fielddata['chip_zero_point_rms'][j]), 99.))

            out.write('END_LIBID: {0:d}\n'.format(ngood))

        print '\r{0:<5d} on images / {1:<5d} placed'.format(ngood, ntot),
        sys.stdout.flush()

    out.write("END_OF_SIMLIB:\n")

    print "\n{0:<5d} on overlapping fields".format(noverlap)

    # Effective area. Only calculated if positions were seleceted randomly.
    if options.infile is None:
        effective_area = totarea * float(ngood) / ntot
        print "Effective area: {0:.4f} deg^2".format(effective_area)
        out.write('EFFECTIVE_AREA: {0:.4f}\n'.format(effective_area))

    out.close()
    print "Wrote to:", fname
Exemple #30
0
class TestApi(unittest.TestCase):

    con = ea.connect(quiet=True)
    tablename = 'testtable'
    nrows = 10000
    prefetch = 4000
    chunk = 1000
    memsize = 1
    sqlfile = 'temp.sql'
    csvfile = 'temp.csv'
    fitsfile = 'temp.fits'
    h5file = 'temp.h5'

    def test_ea_import(self):
        print('\n*** test_ea_import ***\n')
        test1 = self.con.ea_import('wrapped')
        if test1 is not None:
            self.assertTrue(test1)
        test2 = self.con.ea_import('wrapped', help=True)
        if test2 is not None:
            self.assertTrue(test2)

    def test_pandas_to_db(self):
        print('\n*** test_pandas_to_db ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        cursor = self.con.cursor()
        self.assertTrue(self.con.ping())
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        # appending
        self.assertTrue(
            self.con.pandas_to_db(df, tablename=self.tablename, append=True))
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.pandas_to_db(df, tablename=self.tablename, append=True))
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        self.con.drop_table(self.tablename)
        cursor.close()

    def test_query_to_pandas(self):
        print('\n*** test_query_to_pandas ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        query = 'select RA,DEC from {:}'.format(self.tablename.upper())
        df2 = self.con.query_to_pandas(query)
        self.assertEqual(len(df), len(df2))
        self.assertEqual(df.columns.values.tolist().sort(),
                         df2.columns.values.tolist().sort())
        #iterator
        df3 = self.con.query_to_pandas(query, prefetch=4000, iterator=True)
        self.assertEqual(len(df3.next()), 4000)
        self.assertEqual(df3.next().columns.values.tolist().sort(),
                         df.columns.values.tolist().sort())
        self.assertEqual(len(df3.next()), 2000)
        self.con.drop_table(self.tablename)

    def test_describe_table(self):
        print('\n*** test_describe_table ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        self.assertEqual(len(self.con.describe_table(self.tablename)), 2)
        self.con.drop_table(self.tablename)

    def test_loadsql(self):
        print('\n*** test_loadsql ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        query = """
        -- This is a comment
        select RA, DEC from %s -- this is another comment
        """ % self.tablename
        with open(self.sqlfile, 'w') as F:
            F.write(query)
        df2 = self.con.query_to_pandas(self.con.loadsql(self.sqlfile))
        self.assertEqual(len(df), len(df2))
        self.assertEqual(df.columns.values.tolist().sort(),
                         df2.columns.values.tolist().sort())
        query = """
        -- This is a comment
        select RA, DEC from %s ; -- this is another comment
        """ % self.tablename
        with open(self.sqlfile, 'w') as F:
            F.write(query)
        df2 = self.con.query_to_pandas(self.con.loadsql(self.sqlfile))
        self.assertEqual(len(df), len(df2))
        self.assertEqual(df.columns.values.tolist().sort(),
                         df2.columns.values.tolist().sort())
        self.con.drop_table(self.tablename)
        os.remove(self.sqlfile)

    def test_mytables(self):
        print('\n*** test_mytables ***\n')
        df = self.con.mytables()
        self.assertTrue('FGOTTENMETADATA' in df['TABLE_NAME'].values.tolist())

    def test_load_table_csv(self):
        print('\n*** test_load_table_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.drop_table(os.path.splitext(self.csvfile)[0].upper())
        # name from filename
        self.assertTrue(self.con.load_table(self.csvfile))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.csvfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(self.con.append_table(self.csvfile))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.csvfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(os.path.splitext(self.csvfile)[0].upper())
        # name from tablename
        self.con.drop_table(self.tablename)
        self.assertTrue(self.con.load_table(self.csvfile, name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(
            self.con.append_table(self.csvfile, name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        # chunksize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.csvfile,
                                name=self.tablename,
                                chunksize=self.chunk))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(
            self.con.append_table(self.csvfile,
                                  name=self.tablename,
                                  chunksize=self.chunk))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(self.tablename)
        os.remove(self.csvfile)

    def test_load_append_table_memory_csv(self):
        print('\n*** test_load_append_table_memory_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        for i in range(9):
            df.to_csv(self.csvfile,
                      index=False,
                      float_format='%.8f',
                      sep=',',
                      mode='a',
                      header=False)
        self.assertTrue(os.path.exists(self.csvfile))
        # memsize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.csvfile,
                                name=self.tablename,
                                memsize=self.memsize))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 10)
        # appending
        self.assertTrue(
            self.con.append_table(self.csvfile,
                                  name=self.tablename,
                                  memsize=self.memsize))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 20)
        ## end
        os.remove(self.csvfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_memory_chunk_csv(self):
        print('\n*** test_load_append_table_memory_chunk_csv ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_csv(self.csvfile, index=False, float_format='%.8f', sep=',')
        for i in range(9):
            df.to_csv(self.csvfile,
                      index=False,
                      float_format='%.8f',
                      sep=',',
                      mode='a',
                      header=False)
        self.assertTrue(os.path.exists(self.csvfile))
        # memsize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.csvfile,
                                name=self.tablename,
                                memsize=self.memsize,
                                chunksize=self.chunk * 10))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 10)
        # appending
        self.assertTrue(
            self.con.append_table(self.csvfile,
                                  name=self.tablename,
                                  memsize=self.memsize,
                                  chunksize=self.chunk * 200))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 20)
        ## end
        os.remove(self.csvfile)
        self.con.drop_table(self.tablename)

    def test_load_table_fits(self):
        print('\n*** test_load_table_fits ***\n')
        data = create_test_data()
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        self.con.drop_table(os.path.splitext(self.fitsfile)[0].upper())
        # name from filename
        self.assertTrue(self.con.load_table(self.fitsfile))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.fitsfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(self.con.append_table(self.fitsfile))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.fitsfile)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(os.path.splitext(self.fitsfile)[0].upper())
        # name from tablename
        self.con.drop_table(self.tablename)
        self.assertTrue(self.con.load_table(self.fitsfile,
                                            name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(
            self.con.append_table(self.fitsfile, name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        # chunksize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.fitsfile,
                                name=self.tablename,
                                chunksize=self.chunk))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(
            self.con.append_table(self.fitsfile,
                                  name=self.tablename,
                                  chunksize=self.chunk))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(self.tablename)
        os.remove(self.fitsfile)

    def test_load_append_table_memory_fits(self):
        print('\n*** test_load_append_table_memory_fits ***\n')
        data = create_test_data()
        for i in range(4):
            data = np.concatenate((data, data))
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # memsize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.fitsfile,
                                name=self.tablename,
                                memsize=self.memsize))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 16)
        # appending
        self.assertTrue(
            self.con.append_table(self.fitsfile,
                                  name=self.tablename,
                                  memsize=self.memsize))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2 * 16)
        ## end
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_load_append_table_memory_chunk_fits(self):
        print('\n*** test_load_append_table_memory_chunk_fits ***\n')
        data = create_test_data()
        for i in range(4):
            data = np.concatenate((data, data))
        fitsio.write(self.fitsfile, data, clobber=True)
        self.assertTrue(os.path.exists(self.fitsfile))
        # memsize
        self.con.drop_table(self.tablename)
        self.assertTrue(
            self.con.load_table(self.fitsfile,
                                name=self.tablename,
                                memsize=self.memsize,
                                chunksize=self.chunk * 10))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 16)
        # appending
        self.assertTrue(
            self.con.append_table(self.fitsfile,
                                  name=self.tablename,
                                  memsize=self.memsize,
                                  chunksize=self.chunk * 200))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2 * 16)
        ## end
        os.remove(self.fitsfile)
        self.con.drop_table(self.tablename)

    def test_load_table_hdf5(self):
        print('\n*** test_load_table_hdf5 ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        df.to_hdf(self.h5file, key='data')
        self.assertTrue(os.path.exists(self.h5file))
        self.con.drop_table(os.path.splitext(self.h5file)[0].upper())
        # name from filename
        self.assertTrue(self.con.load_table(self.h5file))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.h5file)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(self.con.append_table(self.h5file))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' %
                              os.path.splitext(self.h5file)[0].upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(os.path.splitext(self.h5file)[0].upper())
        # name from tablename
        self.con.drop_table(self.tablename)
        self.assertTrue(self.con.load_table(self.h5file, name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows)
        ## appending
        self.assertTrue(self.con.append_table(self.h5file,
                                              name=self.tablename))
        cursor = self.con.cursor()
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 2)
        self.con.drop_table(self.tablename)
        os.remove(self.h5file)

    def test_query_and_save(self):
        print('\n*** test_query_and_save ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        cursor = self.con.cursor()
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        query = 'select RA,DEC from %s' % self.tablename.upper()
        self.con.query_and_save(query, self.csvfile, print_time=False)
        self.assertTrue(os.path.exists(self.csvfile))
        self.con.query_and_save(query, self.fitsfile, print_time=False)
        self.assertTrue(os.path.exists(self.fitsfile))
        self.con.query_and_save(query, self.h5file, print_time=False)
        self.assertTrue(os.path.exists(self.h5file))
        os.remove(self.csvfile)
        os.remove(self.fitsfile)
        os.remove(self.h5file)
        for i in range(34):
            self.assertTrue(
                self.con.pandas_to_db(df,
                                      tablename=self.tablename,
                                      append=True))
        temp = cursor.execute('select RA,DEC from %s' % self.tablename.upper())
        fetch = temp.fetchall()
        self.assertEqual(len(fetch), self.nrows * 35)
        self.con.outfile_max_mb = 1
        self.con.query_and_save(query, self.csvfile, print_time=False)
        for i in range(4):
            self.assertTrue(
                os.path.exists(
                    os.path.splitext(self.csvfile)[0] + '_00000' + str(i + 1) +
                    '.csv'))
            os.remove(
                os.path.splitext(self.csvfile)[0] + '_00000' + str(i + 1) +
                '.csv')
        self.con.query_and_save(query, self.fitsfile, print_time=False)
        for i in range(4):
            self.assertTrue(
                os.path.exists(
                    os.path.splitext(self.fitsfile)[0] + '_00000' +
                    str(i + 1) + '.fits'))
            os.remove(
                os.path.splitext(self.fitsfile)[0] + '_00000' + str(i + 1) +
                '.fits')

        self.con.outfile_max_mb = 1000
        self.con.drop_table(self.tablename)

    def test_inline_functions(self):
        print('\n*** test_inline_functions ***\n')
        data = create_test_data()
        df = pd.DataFrame(data)
        self.assertEqual(len(df), self.nrows)
        cursor = self.con.cursor()
        try:
            self.con.drop_table(self.tablename)
        except:
            pass
        self.assertTrue(self.con.pandas_to_db(df, tablename=self.tablename))
        query = 'select /*p: Y.my_sum(ra,dec) as testcol*/ from %s' % self.tablename
        self.con.ea_import('wrapped as Y')
        df = self.con.query_to_pandas(query)
        self.assertEqual(len(df), self.nrows)
        self.assertTrue('TESTCOL' in df.columns.values.tolist())
        self.con.drop_table(self.tablename)
Exemple #31
0
def run(args):
    logname = datetime.datetime.now().strftime('%Y%m%d-%H%M%S')
    logging.basicConfig(filename='BulkThumbs_' + logname + '_Rank_' +
                        str(rank) + '.log',
                        format='%(asctime)s - %(levelname)-8s - %(message)s',
                        level=logging.INFO)
    logger = logging.getLogger(__name__)
    logger.info('Rank: ' + str(rank) + '\n')

    xs = float(args.xsize)
    ys = float(args.ysize)
    colors = args.colors.split(',')
    usernm = ''
    jobid = ''
    outdir = ''

    if rank == 0:
        start = time.time()
        if args.db == 'DR1':
            db = 'desdr'
        elif args.db == 'Y3A2':
            db = 'dessci'

        logger.info('Selected Options:')

        # This puts any input type into a pandas dataframe
        if args.csv:
            userdf = pd.DataFrame(pd.read_csv(args.csv))
            logger.info('    CSV: ' + args.csv)
        elif args.ra:
            coords = {}
            coords['RA'] = args.ra
            coords['DEC'] = args.dec
            userdf = pd.DataFrame.from_dict(coords, orient='columns')
            logger.info('    RA: ' + str(args.ra))
            logger.info('    DEC: ' + str(arg.dec))
        elif args.coadd:
            coadds = {}
            coadds['COADD_OBJECT_ID'] = args.coadd
            userdf = pd.DataFrame.from_dict(coadds, orient='columns')
            logger.info('    CoaddID: ' + str(args.coadd))

        logger.info('    X size: ' + str(args.xsize))
        logger.info('    Y size: ' + str(args.ysize))
        logger.info('    Make TIFFs? ' + str(args.make_tiffs))
        logger.info('    Make PNGs? ' + str(args.make_pngs))
        logger.info('    Make FITS? ' + str(args.make_fits))
        if args.make_fits:
            logger.info('        Bands: ' + args.colors)

        df = pd.DataFrame()
        unmatched_coords = {'RA': [], 'DEC': []}
        unmatched_coadds = []

        logger.info('Connecting to: ' + db)
        conn = ea.connect(db)
        curs = conn.cursor()

        usernm = str(conn.user)
        jobid = str(uuid.uuid4())
        #outdir = usernm + '/' + jobid + '/'
        outdir = OUTDIR + '/' + usernnm + '/' + jobid + '/'
        tablename = 'BTL_' + jobid.upper().replace(
            "-", "_")  # "BulkThumbs_List_<jobid>"

        if 'RA' in userdf:
            print(userdf.head())

            if args.db == 'Y3A2':
                ra_adjust = [
                    360 - userdf['RA'][i]
                    if userdf['RA'][i] > 180 else userdf['RA'][i]
                    for i in range(len(userdf['RA']))
                ]
                userdf = userdf.assign(RA_ADJUSTED=ra_adjust)
                userdf.to_csv(tablename + '.csv', index=False)
                conn.load_table(tablename + '.csv', name=tablename)

                query = "select temp.RA, temp.DEC, temp.RA_ADJUSTED, temp.RA as ALPHAWIN_J2000, temp.DEC as DELTAWIN_J2000, m.TILENAME from Y3A2_COADDTILE_GEOM m, {} temp where (m.CROSSRA0='N' and (temp.RA between m.RACMIN and m.RACMAX) and (temp.DEC between m.DECCMIN and m.DECCMAX)) or (m.CROSSRA0='Y' and (temp.RA_ADJUSTED between m.RACMIN-360 and m.RACMAX) and (temp.DEC between m.DECCMIN and m.DECCMAX))".format(
                    tablename)

                df = conn.query_to_pandas(query)
                curs.execute('drop table {}'.format(tablename))
                os.remove(tablename + '.csv')

            if args.db == 'DR1':
                for i in range(len(userdf)):
                    ra = userdf['RA'][i]
                    ra180 = ra
                    if ra > 180:
                        ra180 = 360 - ra

                    if args.db == 'DR1':
                        query = "select * from (select TILENAME from DR1_TILE_INFO where (CROSSRA0='N' and ({0} between RACMIN and RACMAX) and ({1} between DECCMIN and DECCMAX)) or (CROSSRA0='Y' and ({2} between RACMIN-360 and RACMAX) and ({1} between DECCMIN and DECCMAX))) where rownum=1".format(
                            ra, userdf['DEC'][i], ra180)

                    f = conn.query_to_pandas(query)
                    if f.empty:
                        unmatched_coords['RA'].append(userdf['RA'][i])
                        unmatched_coords['DEC'].append(userdf['DEC'][i])
                    else:
                        df = df.append(f)

        if 'COADD_OBJECT_ID' in userdf:
            if args.db == 'Y3A2':
                conn.load_table(args.csv, name=tablename)

                query = "select temp.COADD_OBJECT_ID, m.ALPHAWIN_J2000, m.DELTAWIN_J2000, m.RA, m.DEC, m.TILENAME from Y3A2_COADD_OBJECT_SUMMARY m, {} temp where temp.COADD_OBJECT_ID=m.COADD_OBJECT_ID".format(
                    tablename)

                df = conn.query_to_pandas(query)
                curs.execute('drop table {}'.format(tablename))

            if args.db == 'DR1':
                for i in range(len(userdf)):
                    query = "select COADD_OBJECT_ID, ALPHAWIN_J2000, DELTAWIN_J2000, RA, DEC, TILENAME from DR1_MAIN where COADD_OBJECT_ID={0}".format(
                        userdf['COADD_OBJECT_ID'][i])

                    f = conn.query_to_pandas(query)
                    if f.empty:
                        unmatched_coadds.append(userdf['COADD_OBJECT_ID'][i])
                    else:
                        df = df.append(f)

        conn.close()
        df = df.sort_values(by=['TILENAME'])

        #chunksize = int(df.shape[0] / nprocs) + (df.shape[0] % nprocs)
        #df = [ df[ i:i+chunksize ] for i in range(0, df.shape[0], chunksize) ]
        df = np.array_split(df, nprocs)

        end = time.time()
        print('Querying took (s): ' + str(end - start))
        print(unmatched_coords)
        print(unmatched_coadds)
        logger.info('Querying took (s): ' + str(end - start))
        if db == 'desdr':
            logger.info('For coords input and DR1 db, \nUnmatched Coords: ' +
                        str(unmatched_coords))
            logger.info('For coadds input and DR1 db, \nUnmatched Coadds: ' +
                        str(unmatched_coadds))

    else:
        df = None

    usernm, jobid, outdir = comm.bcast([usernm, jobid, outdir], root=0)
    #outdir = usernm + '/' + jobid + '/'
    df = comm.scatter(df, root=0)

    logger.info('JobID: ' + str(jobid))

    tilenm = df['TILENAME'].unique()
    for i in tilenm:
        #tiledir = 'tiles_sample/' + i + '/'
        tiledir = TILES_FOLDER + '/' + i + '/'
        udf = df[df.TILENAME == i]
        udf = udf.reset_index()

        size = u.Quantity((ys, xs), u.arcmin)
        positions = SkyCoord(udf['ALPHAWIN_J2000'],
                             udf['DELTAWIN_J2000'],
                             frame='icrs',
                             unit='deg',
                             equinox='J2000',
                             representation_type='spherical')

        if args.make_tiffs or args.make_pngs:
            MakeTiffCut(tiledir, outdir, positions, xs, ys, udf,
                        args.make_tiffs, args.make_pngs)

        if args.make_fits:
            MakeFitsCut(tiledir, outdir, size, positions, colors, udf)
 def test_connect_dessci(self):
     con = ea.connect('dessci', quiet=True)
     self.assertTrue(con.ping())
Exemple #33
0
    def __init__(self, trigger_id, jsonfilelist, triggerdir, datadir, real, trigger_path):

        #os.system('kinit -k -t /var/keytab/desgw.keytab desgw/des/[email protected]')
        tstart = time.time()

        if real:
            self.backend = FileBackend("./realdb")
        else:
            self.backend = FileBackend("./testdb")

        try:
            self.thisevent = self.backend.get(Trigger, {'id': trigger_id})
            print 'Found this event in desgw database...'
        except Trigger.DoesNotExist:
            self.thisevent = Trigger({
                'id':trigger_id,
                'jsonfilelist':jsonfilelist,
                'triggerpath':triggerdir,
                'mapspath':datadir,
                'jobids':[
                    (0,'jsonfile_corresponding_to_jobid.json'),
                ],
                'postprocint': 0
            })
            print 'Database entry created!'


        self.trigger_id = trigger_id
        self.trigger_path = trigger_path

        self.backend.save(self.thisevent)
        self.backend.commit()

        with open(os.path.join(triggerdir,"strategy.yaml"), "r") as f:
            self.config = yaml.safe_load(f);
        self.filterobslist = np.array(self.config['exposure_filter'],dtype='str')
        self.strategydict = {}

        for f in np.unique(self.filterobslist):
            self.strategydict[f] = len(self.filterobslist[self.filterobslist == f])

        self.connection = ea.connect(DATABASE)
        self.cursor = self.connection.cursor()

        self.jsonfilelist = jsonfilelist

        print self.jsonfilelist
        if hardjson:
            self.jsonfilelist = hj

        #self.pp = subprocess.Popen('echo starting',stdout=PIPE, stderr=PIPE,shell=True)

        self.trigger_id = trigger_id
        self.datadir = datadir
        self.triggerdir = triggerdir
        self.processingdir = os.path.join(self.triggerdir,'PROCESSING')
        if not os.path.exists(self.processingdir):
            os.makedirs(self.processingdir)

        dire = './processing/' + trigger_id + '/'
        if not os.path.exists(dire):
            os.makedirs(dire)

        with open(os.path.join(triggerdir, "strategy.yaml"), "r") as f:
            self.strategy = yaml.safe_load(f)

        with open("jobmanager.yaml", "r") as g:
            self.jmconfig = yaml.safe_load(g);


        q1 = "select expnum,nite,mjd_obs,telra,teldec,band,exptime,propid,obstype,object from exposure where " \
             "nite>20130828 and nite<20150101 and expnum<300000 and obstype='object' order by expnum"  # y1 images
        self.connection.query_and_save(q1, './processing/exposuresY1.tab')

        q2 = "select expnum,nite,mjd_obs,radeg,decdeg,band,exptime,propid,obstype,object from prod.exposure where " \
             "nite>20150901 and obstype='object' order by expnum"  # y2 and later
        self.connection.query_and_save(q2, './processing/exposuresCurrent.tab')

        os.system('cat ./processing/exposuresY1.tab ./processing/exposuresCurrent.tab > ./processing/exposures.list')

        #self.submit_post_processing()

        self.submit_all_jsons_for_sejobs()#preps all DES images that already exist
        tfin = time.time()
        print 'TOTAL SE JOBS TIME', tfin - tstart
        #sys.exit()
        self.monitor_images_from_mountain()#A loop that waits for images off mountain and submits for processing