def update_status(id, **kwargs): with SurveysDB() as sdb: r = sdb.get_field(id) for k in kwargs: r[k] = kwargs[k] sdb.set_field(r)
print('Now searching for results directories') cwd = os.getcwd() # find what we need to put in the mosaic mosaicpointings, mosseps = find_pointings_to_mosaic( pointingdict, mospointingname) if ignorepointings != '': ignorepointings = ignorepointings.split(',') maxsep = np.max(mosseps) # now find whether we have got these pointings somewhere! mosaicdirs = [] missingpointing = False scales = [] sdb = SurveysDB() for p in mosaicpointings: if p in ignorepointings: continue print('Wanting to put pointing %s in mosaic' % p) for d in args.directories: rd = d + '/' + p print(rd) if os.path.isfile(rd + '/' + fname): mosaicdirs.append(rd) try: qualitydict = sdb.get_quality(p) currentdict = sdb.get_field(p) print(qualitydict) #scale=qualitydict['scale'] scale = 1.0 / (qualitydict['nvss_scale'] / 5.9124)
if 'ddfp-' in jobname: jobs[jobname[5:]]=status return jobs def plotcircle(name,ra,dec,color,pcolor='black'): circle1=plt.Circle((ra,dec),csize,color=color,alpha=0.2) plt.gcf().gca().add_artist(circle1) plt.scatter(ra,dec,color=pcolor) plt.text(ra,dec,name) jobs=qstat() print jobs circles=[] s_colours={'D/L failed':'black','List failed':'black','Downloading':'red','Downloaded':'orange','Unpacking':'orange','Unpacked':'orange','Ready':'yellow','Queued':'blue','Running':'cyan','Complete':'green','Archiving':'green','Archived':'olive','Stopped':'magenta','Failed':'magenta','DI_started':'white'} sdb=SurveysDB(readonly=True) sdb.cur.execute('select * from fields where status!="Not started"') fields=sdb.cur.fetchall() sdb.close() for f in fields: status=f['status'] shortname=f['id'] job_status = '' d_colour='black' if status=='Running' or status=='Ready' or status=='Failed': if len(jobs)>0: if shortname not in jobs: job_status = 'not queued' d_colour = 'red' elif jobs[shortname] == 'Q':
#!/usr/bin/python # Ingest the LOTSS file provided by Tim import sys from surveys_db import SurveysDB import requests root = 'https://lofar-webdav.grid.sara.nl/SKSP/L' lines = [l.rstrip() for l in open(sys.argv[1]).readlines()] sdb = SurveysDB() sdb.cur.execute('delete from fields_new') sdb.cur.execute('delete from observations') for l in lines[1:]: bits = l.split(',') if bits[1] == 'WRONG': continue field = bits[0] ra = float(bits[1]) dec = float(bits[2]) time = float(bits[3]) if bits[4] == 'None': ids = [] else: ids = bits[4].split(' ') if ids[0] == '': ids = ids[1:] print time, field, ra, dec, ids sdb.cur.execute( 'insert into fields_new values ( %s,"Not started",%s,%s,NULL,NULL,NULL,NULL,0)',
os.system( 'ssh lofar.herts.ac.uk "rm -rf /beegfs/lofar/lba/calibration_solutions/%s"' % cal) os.system( 'ssh lofar.herts.ac.uk "mkdir /beegfs/lofar/lba/calibration_solutions/%s"' % cal) os.system( 'scp -q cal-pa.h5 cal-amp.h5 cal-iono.h5 lofar.herts.ac.uk:/beegfs/lofar/lba/calibration_solutions/%s' % cal) os.system( 'scp -q -r plots* lofar.herts.ac.uk:/beegfs/lofar/lba/calibration_solutions/%s' % cal) # update the db from surveys_db import SurveysDB with SurveysDB(survey='lba', readonly=False) as sdb: sdb.execute( 'INSERT INTO observations (id,calibratordata) VALUES (%i,"%s")' % (obsid, cal)) w.done('upload') ### DONE # a debug image if imaging: logger.info("Imaging section:") if iono3rd: MSs = lib_ms.AllMSs(sorted( glob.glob('./*MS'))[int(len(glob.glob('./*MS')) / 2.):], s,
t=Table.read('/home/mjh/pipeline-master/ddf-pipeline/misc/DR2-pointings.txt',format='ascii') colnames=['Field','RA','Dec'] for i,c in enumerate(t.colnames): t[c].name=colnames[i] fields=['0h','8h','13h'] t0h=t[(t['RA']<50) | (t['RA']>300)] t8h=t[(t['RA']<140) & (t['RA']>90)] t13h=t[(t['RA']>140) & (t['RA']<250)] ft=[t0h,t8h,t13h] for field,t in zip(fields,ft): print field,len(t) with SurveysDB() as sdb: sdb.cur.execute('select fields.id,fields.ra,fields.decl,fields.status,quality.* from fields left join quality on fields.id=quality.id order by fields.id') results=sdb.cur.fetchall() td={} for r in results: td[r['id']]=r for field,t in zip(fields,ft): print field,len(t) complete=0 incomplete=0 incomplete_fields=[] rmsmean=[] for r in t: name=r['Field']
args = parser.parse_args() mospointingname = args.mospointingname pointingdict = read_pointingfile() print 'Now searching for results directories' cwd = os.getcwd() # find what we need to put in the mosaic mosaicpointings, mosseps = find_pointings_to_mosaic( pointingdict, mospointingname) maxsep = np.max(mosseps) # now find whether we have got these pointings somewhere! mosaicdirs = [] missingpointing = False sdb = SurveysDB() for p in mosaicpointings: print 'Wanting to put pointing %s in mosaic' % p currentdict = sdb.get_field(p) for d in args.directories: rd = d + '/' + p print rd if os.path.isfile( rd + '/image_full_ampphase_di_m.NS_shift.int.facetRestored.fits' ): mosaicdirs.append(rd) break else: print 'Pointing', p, 'not found'
from astropy.table import Table import glob import os import numpy as np from surveys_db import SurveysDB os.chdir('/data/lofar/DR2/fields') g = glob.glob('*') a1l = [] a2l = [] sdb = SurveysDB() for d in g: infile = d + '/image_full_ampphase_di_m.NS.cat.fits_NVSS_match.fits' if os.path.isfile(infile): t = Table.read(infile) t = t[t['Total_flux'] > 0.01] l1 = len(t) #alpha1=np.median(np.log(t['Total_flux']/t['NVSS_Total_flux'])/np.log(1400/144.0)) alpha1 = np.median(t['Total_flux'] / t['NVSS_Total_flux']) t = t[t['Total_flux'] > 0.03] l2 = len(t) #alpha2=np.median(np.log(t['Total_flux']/t['NVSS_Total_flux'])/np.log(1400/144.0)) alpha2 = np.median(t['Total_flux'] / t['NVSS_Total_flux']) print d, alpha1, alpha2, l1, l2 a1l.append(alpha1) a2l.append(alpha2) r = sdb.get_quality(d) r['nvss_scale'] = alpha2
RA = ravals Dec = decvals org = 180 x = np.remainder(RA + 360 - org, 360) # shift RA values ind = x > 180 x[ind] -= 360 # scale conversion to [-180, 180] x = -x # reverse the scale: East to the left ax.scatter(np.radians(x), np.radians(Dec), s=10, color='r', zorder=1, alpha=1.0) # convert degrees to radians sdb = SurveysDB() ravals = [] decvals = [] for i in range(0, len(identities)): obsdict = sdb.get_observation(identities[i]) status = obsdict['status'] if status == 'DI_processed' or status == 'DI_Processed': fielddict = sdb.get_field(identities_fields[i]) ravals.append(fielddict['ra']) decvals.append(fielddict['decl']) progressdict[identities_fields[i]][2] = 'partly processed' sdb.close() ravals = np.array(ravals) decvals = np.array(decvals)
# ingest file from Francesco from astropy.table import Table from surveys_db import SurveysDB t = Table.read('/beegfs/lofar/mjh/lba/allsky-grid.fits') with SurveysDB(survey='lba') as sdb: for r in t: f = sdb.create_field(r['name']) f['ra'] = r['radeg'] f['decl'] = r['decdeg'] f['gal_l'] = r['GAL_LONG'] f['gal_b'] = r['GAL_LAT'] f['status'] = 'Not started' f['lotss_field'] = 1 sdb.set_field(f)
if fieldname == 'LoTSS-DR2': print lotssdr2['Mosaic_ID'][0] else: if nodatabase: print 'Scaling factor being used for field catalogue', fieldfactor lotssdr2['Peak_flux'] = lotssdr2[ 'Peak_flux'] * fieldfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['Total_flux'] = lotssdr2[ 'Total_flux'] * fieldfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['E_Peak_flux'] = lotssdr2[ 'E_Peak_flux'] * fieldfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['E_Total_flux'] = lotssdr2[ 'E_Total_flux'] * fieldfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat else: from surveys_db import SurveysDB sdb = SurveysDB() qualitydict = sdb.get_quality(fieldname) sdb.close() nvssfactor = 1.0 / (qualitydict['nvss_scale'] / 5.9124) tgssscale = qualitydict['tgss_scale'] print 'Scaling comparison catalogue by nvssfactor', nvssfactor lotssdr2['Peak_flux'] = lotssdr2[ 'Peak_flux'] * nvssfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['Total_flux'] = lotssdr2[ 'Total_flux'] * nvssfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['E_Peak_flux'] = lotssdr2[ 'E_Peak_flux'] * nvssfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat lotssdr2['E_Total_flux'] = lotssdr2[ 'E_Total_flux'] * nvssfactor * 1000.0 #1000 scaling is to match mJy which is the units of LoTSS-DR2 cat #lotssdr2=select_isolated_sources(lotssdr2,45)
#!/usr/bin/python #insert a field and priority into the list for processing import sys from surveys_db import SurveysDB name=sys.argv[1] if name[0]=='L': name=name[1:] id=int(name) priority=int(sys.argv[2]) sdb=SurveysDB() idd=sdb.get_id(id) if idd is not None: print 'Field already exists in the database! (Status is %s)' % idd['status'] else: idd=sdb.create_id(id) idd['status']='Preprocessed' idd['priority']=priority sdb.set_id(idd) sdb.close()
'List failed': 'black', 'Downloading': 'red', 'Downloaded': 'orange', 'Unpacking': 'orange', 'Unpacked': 'orange', 'Ready': 'yellow', 'Queued': 'blue', 'Running': 'cyan', 'Complete': 'green', 'Archiving': 'green', 'Archived': 'olive', 'Stopped': 'magenta', 'Failed': 'magenta', 'DI_started': 'white' } sdb = SurveysDB(readonly=True) sdb.cur.execute('select * from fields where status!="Not started"') fields = sdb.cur.fetchall() sdb.close() for f in fields: status = f['status'] shortname = f['id'] job_status = '' d_colour = 'black' if status == 'Running' or status == 'Ready' or status == 'Failed': if len(jobs) > 0: if shortname not in jobs: job_status = 'not queued' d_colour = 'red' elif jobs[shortname] == 'Q':
def __init__(self, ListMSName, ColName="DATA", ModelName="PREDICT_KMS", UVRange=[1., 1000.], SolsName=None, FileCoords=None, Radius=3., NOff=-1, Image=None, SolsDir=None, NCPU=1): self.ListMSName = sorted(ListMSName) #[0:2] self.nMS = len(self.ListMSName) self.ColName = ColName self.ModelName = ModelName self.OutName = self.ListMSName[0].split("/")[-1].split("_")[0] self.UVRange = UVRange self.ReadMSInfos() self.Radius = Radius self.Image = Image self.SolsDir = SolsDir #self.PosArray=np.genfromtxt(FileCoords,dtype=[('Name','S200'),("ra",np.float64),("dec",np.float64),('Type','S200')],delimiter="\t") # identify version in logs print >> log, "DynSpecMS version %s starting up" % version() # should we use the surveys DB? if 'DDF_PIPELINE_DATABASE' in os.environ: print >> log, "Using the surveys database" from surveys_db import SurveysDB with SurveysDB() as sdb: sdb.cur.execute('select * from transients') result = sdb.cur.fetchall() # convert to a list, then to ndarray, then to recarray l = [] for r in result: l.append((r['id'], r['ra'], r['decl'], r['type'])) self.PosArray = np.asarray(l, dtype=[('Name', 'S200'), ("ra", np.float64), ("dec", np.float64), ('Type', 'S200')]) print >> log, "Created an array with %i records" % len(result) else: if FileCoords is None: FileCoords = "Transient_LOTTS.csv" if not os.path.isfile(FileCoords): ssExec = "wget -q --user=anonymous ftp://ftp.strw.leidenuniv.nl/pub/tasse/%s -O %s" % ( FileCoords, FileCoords) print >> log, "Downloading %s" % FileCoords print >> log, " Executing: %s" % ssExec os.system(ssExec) self.PosArray = np.genfromtxt(FileCoords, dtype=[('Name', 'S200'), ("ra", np.float64), ("dec", np.float64), ('Type', 'S200')], delimiter=",")[()] self.PosArray = self.PosArray.view(np.recarray) self.PosArray.ra *= np.pi / 180. self.PosArray.dec *= np.pi / 180. NOrig = self.PosArray.shape[0] Dist = AngDist(self.ra0, self.PosArray.ra, self.dec0, self.PosArray.dec) ind = np.where(Dist < Radius * np.pi / 180)[0] self.PosArray = self.PosArray[ind] self.NDirSelected = self.PosArray.shape[0] print >> log, "Selected %i target [out of the %i in the original list]" % ( self.NDirSelected, NOrig) if self.NDirSelected == 0: print >> log, ModColor.Str(" Have found no sources - returning") self.killWorkers() return if NOff == -1: NOff = self.PosArray.shape[0] * 2 if NOff is not None: print >> log, "Including %i off targets" % (NOff) self.PosArray = np.concatenate( [self.PosArray, self.GiveOffPosArray(NOff)]) self.PosArray = self.PosArray.view(np.recarray) self.NDir = self.PosArray.shape[0] print >> log, "For a total of %i targets" % (self.NDir) self.DicoDATA = shared_dict.create("DATA") self.DicoGrids = shared_dict.create("Grids") self.DicoGrids["GridLinPol"] = np.zeros( (self.NDir, self.NChan, self.NTimes, 4), np.complex128) self.DicoGrids["GridWeight"] = np.zeros( (self.NDir, self.NChan, self.NTimes, 4), np.complex128) self.SolsName = SolsName self.DoJonesCorr = False if self.SolsName: self.DoJonesCorr = True self.DicoJones = shared_dict.create("DicoJones") APP.registerJobHandlers(self) AsyncProcessPool.init(ncpu=NCPU, affinity=0) APP.startWorkers()
r_in=[] r_out=[] for f in r: if sf(f): ra.append(f['ra']) dec.append(f['decl']) r_in.append(f) else: r_out.append(f) ra_r,dec_r=cc(ra,dec) plt.scatter(ra_r,dec_r,label=label,**kwargs) print "%-20s : %i" % (label,len(r_in)) return r_in,r_out with SurveysDB(readonly=True) as sdb: sdb.cur.execute('select fields.id as id,gal_l as ra,gal_b as decl,fields.status as status,observations.status as ostatus,observations.location as location,sum(nsb*integration/232) as s,count(observations.id) as c,fields.priority from fields left join observations on (observations.field=fields.id) group by fields.id having ostatus is not null') #sdb.cur.execute('select * from fields where status!="Not started"') results=sdb.cur.fetchall() print len(results),'fields have some observations' fig = plt.figure(figsize=(16, 8)) fig.add_subplot(111, projection='aitoff') # GP for b in [-10,0,10]: lon=np.linspace(-180,180,1000) lat=b*np.ones_like(lon)
#!/usr/bin/env python #insert a field and priority into the list for processing from __future__ import print_function import sys from surveys_db import SurveysDB name = sys.argv[1] if name[0] == 'L': name = name[1:] id = int(name) priority = int(sys.argv[2]) sdb = SurveysDB() idd = sdb.get_id(id) if idd is not None: print('Field already exists in the database! (Status is %s)' % idd['status']) else: idd = sdb.create_id(id) idd['status'] = 'Preprocessed' idd['priority'] = priority sdb.set_id(idd) sdb.close()
def make_custom_config(name, workdir, do_field, averaged=False): if do_field: with SurveysDB() as sdb: idd = sdb.get_field(name) if idd['gal_b'] is None: print('Missing Galactic co-ordinates, adding them') add_galcoords(sdb, [idd]) idd = sdb.get_field(name) no_wenss = ((idd['decl'] < 32) | (idd['decl'] > 72)) no_tgss = (idd['no_tgss'] == 1) if idd['lotss_field'] > 0: lotss_field = True do_polcubes = True do_dynspec = True do_stokesv = True do_spectral_restored = True else: lotss_field = False do_polcubes = (idd['do_polcubes'] > 0) do_dynspec = (idd['do_dynspec'] > 0) do_stokesv = (idd['do_stokesv'] > 0) do_spectral_restored = (idd['do_spectral_restored'] > 0) else: # assume LOTSS defaults no_wenss = False no_tgss = False lotss_field = True if np.abs(idd['gal_b']) < 10.0: template = os.environ[ 'DDF_DIR'] + '/ddf-pipeline/examples/tier1-jul2018-MW.cfg' else: if no_wenss: if idd['decl'] < 10.0: template = os.environ[ 'DDF_DIR'] + '/ddf-pipeline/examples/tier1-jul2018-lowdec.cfg' else: template = os.environ[ 'DDF_DIR'] + '/ddf-pipeline/examples/tier1-jul2018-NVSS.cfg' else: template = os.environ[ 'DDF_DIR'] + '/ddf-pipeline/examples/tier1-jul2018.cfg' lines = open(template).readlines() outfile = open(workdir + '/tier1-config.cfg', 'w') for l in lines: if 'colname' in l and averaged: outfile.write('colname=DATA\n') elif '[control]' in l and no_tgss: outfile.write(l + 'redo_DI=True\n') elif 'do_dynspec' in l and not do_dynspec: outfile.write('do_dynspec=False\n') elif 'spectral_restored' in l and not do_spectral_restored: outfile.write('spectral_restored=False\n') elif 'polcubes' in l and 'compress' not in l and not do_polcubes: outfile.write('polcubes=False\n') elif 'stokesv' in l and not do_stokesv: outfile.write('stokesv=False\n') else: outfile.write(l)
def InitFromCatalog(self): FileCoords = self.FileCoords dtype = [('Name', 'S200'), ("ra", np.float64), ("dec", np.float64), ('Type', 'S200')] # should we use the surveys DB? if 'DDF_PIPELINE_DATABASE' in os.environ: print("Using the surveys database", file=log) from surveys_db import SurveysDB with SurveysDB() as sdb: sdb.cur.execute('select * from transients') result = sdb.cur.fetchall() # convert to a list, then to ndarray, then to recarray l = [] for r in result: l.append((r['id'], r['ra'], r['decl'], r['type'])) if FileCoords is not None: print('Adding data from file ' + FileCoords, file=log) additional = np.genfromtxt(FileCoords, dtype=dtype, delimiter=",")[()] if not additional.shape: # deal with a one-line input file additional = np.array([additional], dtype=dtype) for r in additional: l.append(tuple(r)) self.PosArray = np.asarray(l, dtype=dtype) print("Created an array with %i records" % len(result), file=log) else: #FileCoords="Transient_LOTTS.csv" if FileCoords is None: if not os.path.isfile(FileCoords): ssExec = "wget -q --user=anonymous ftp://ftp.strw.leidenuniv.nl/pub/tasse/%s -O %s" % ( FileCoords, FileCoords) print("Downloading %s" % FileCoords, file=log) print(" Executing: %s" % ssExec, file=log) os.system(ssExec) log.print("Reading cvs file: %s" % FileCoords) #self.PosArray=np.genfromtxt(FileCoords,dtype=dtype,delimiter=",")[()] self.PosArray = np.genfromtxt(FileCoords, dtype=dtype, delimiter=",") self.PosArray = self.PosArray.view(np.recarray) self.PosArray.ra *= np.pi / 180. self.PosArray.dec *= np.pi / 180. Radius = self.Radius NOrig = self.PosArray.Name.shape[0] Dist = AngDist(self.ra0, self.PosArray.ra, self.dec0, self.PosArray.dec) ind = np.where(Dist < (Radius * np.pi / 180))[0] self.PosArray = self.PosArray[ind] self.NDirSelected = self.PosArray.shape[0] print("Selected %i target [out of the %i in the original list]" % (self.NDirSelected, NOrig), file=log) if self.NDirSelected == 0: print(ModColor.Str(" Have found no sources - returning"), file=log) self.killWorkers() return NOff = self.NOff if NOff == -1: NOff = self.PosArray.shape[0] * 2 if NOff is not None: print("Including %i off targets" % (NOff), file=log) self.PosArray = np.concatenate( [self.PosArray, self.GiveOffPosArray(NOff)]) self.PosArray = self.PosArray.view(np.recarray) self.NDir = self.PosArray.shape[0] print("For a total of %i targets" % (self.NDir), file=log) self.DicoDATA = shared_dict.create("DATA") self.DicoGrids = shared_dict.create("Grids") self.DicoGrids["GridLinPol"] = np.zeros( (self.NDir, self.NChan, self.NTimes, 4), np.complex128) self.DicoGrids["GridWeight"] = np.zeros( (self.NDir, self.NChan, self.NTimes, 4), np.complex128) self.DoJonesCorr_kMS = False self.DicoJones = None if self.SolsName: self.DoJonesCorr_kMS = True self.DicoJones_kMS = shared_dict.create("DicoJones_kMS") self.DoJonesCorr_Beam = False if self.BeamModel: self.DoJonesCorr_Beam = True self.DicoJones_Beam = shared_dict.create("DicoJones_Beam") APP.registerJobHandlers(self) AsyncProcessPool.init(ncpu=self.NCPU, affinity=0) APP.startWorkers()
args = parser.parse_args() mospointingname = args.mospointingname pointingdict = read_pointingfile() print 'Now searching for results directories' cwd=os.getcwd() # find what we need to put in the mosaic mosaicpointings,mosseps = find_pointings_to_mosaic(pointingdict,mospointingname) maxsep=np.max(mosseps) # now find whether we have got these pointings somewhere! mosaicdirs=[] missingpointing = False sdb = SurveysDB() for p in mosaicpointings: print 'Wanting to put pointing %s in mosaic'%p currentdict = sdb.get_field(p) for d in args.directories: rd=d+'/'+p print rd if os.path.isfile(rd+'/image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'): mosaicdirs.append(rd) break else: print 'Pointing',p,'not found' missingpointing = True if not missingpointing and (currentdict['status'] != 'Archived' or currentdict['archive_version'] != 4): print 'Pointing',p,'not archived with archive_version 4'
def __init__(self): with SurveysDB(readonly=True) as sdb: sdb.cur.execute('select * from fields left join quality on fields.id=quality.id order by fields.id') results=sdb.cur.fetchall() self.t=table_from_dict_list(results) self.t['sc']=SkyCoord(self.t['ra'],self.t['decl'],unit=u.deg)