def udb_process1scan(ifb_1, reprocess=False): '''Given the set of ifb entries, process them into a single UDB Miriad dataset for this scan''' n1 = len(ifb_1) filelist = fdb.fdb_list_fileid(ifb_1) print filelist #Next, name the output files, this should be based on the start #time of the first file, changed from using scan_id, 2014-06-17, #jmm scan_id = ifb_1[0].scanid source_id = ifb_1[0].sourceid project_id = ifb_1[0].projectid st_ts = ifb_1[0].st_ts en_ts = ifb_1[n1 - 1].en_ts #Take care of two digit year here # ufileid = 'UDB'+'20'+scan_id ufileid = 'U' + ifb_1[0].fileid[1:] yyyy = ufileid[3:7] ufilename = udbdir + yyyy + '/' + ufileid ufb = fdb.pfiledb(ufileid, scan_id, source_id, project_id, st_ts, en_ts, 0) # check for reprocess if reprocess == True: idb_datadir = idbdir else: idb_datadir = idbfinaldir + yyyy + '/' #endelse # Call udbfile_create for this filelist filelist_full = filelist for j in range(len(filelist)): filelist_full[j] = idb_datadir + filelist[j] #endfor if n1 > 50: print "UDB_PROCESS1SCAN: Too many Files N1 = ", n1 print "UDB_PROCESS1SCAN: Reset to 50" n1 = 50 filelist_full = filelist_full[0:n1 - 1] #endif #If the file exists, you need to delete it if os.path.isdir(ufilename) == True: print "UDB_PROCESS1SCAN: dataset: ", ufilename, " will be deleted" shutil.rmtree(ufilename) #endif ufile_out = udb_util.udbfile_create(filelist_full, ufilename) if (len(ufile_out) == 0): print "UDB_PROCESS1SCAN: Error creating: ", ufilename return [] else: return [ufb]
def udb_process1scan(ifb_1, reprocess=False): '''Given the set of ifb entries, process them into a single UDB Miriad dataset for this scan''' n1 = len(ifb_1) filelist = fdb.fdb_list_fileid(ifb_1) print filelist sys.stdout.flush() #Next, name the output files, this should be based on the start #time of the first file, changed from using scan_id, 2014-06-17, #jmm scan_id = ifb_1[0].scanid source_id = ifb_1[0].sourceid project_id = ifb_1[0].projectid st_ts = ifb_1[0].st_ts en_ts = ifb_1[n1 - 1].en_ts #Take care of two digit year here # ufileid = 'UDB'+'20'+scan_id ufileid = 'U' + ifb_1[0].fileid[1:] yyyy = ufileid[3:7] ufilename = udbdir + yyyy + '/' + ufileid ufb = fdb.pfiledb(ufileid, scan_id, source_id, project_id, st_ts, en_ts, 0) # Call udbfile_create for this filelist filelist_full = [] for j in range(len(filelist)): #do not process files with pstatus = 666 if ifb_1[j].pstatus != 666: if reprocess == True: filelistj = filelist[j] yyyymmdd = filelistj[len(filelistj) - 14:len(filelistj) - 6] filelist_full.append(idbfinaldir + yyyymmdd + '/' + filelist[j]) else: filelist_full.append(idbdir + filelist[j]) #endelse #endif #endfor n1 = len(filelist_full) if n1 == 0: print 'UDB_PROCESS1SCAN: No good files to process' sys.stdout.flush() return [], [] #endif if n1 > 50: print "UDB_PROCESS1SCAN: Too many Files N1 = ", n1 print "UDB_PROCESS1SCAN: Reset to 50" sys.stdout.flush() n1 = 50 filelist_full = filelist_full[0:n1 - 1] #endif #If the file exists, you need to delete it if os.path.isdir(ufilename) == True: print "UDB_PROCESS1SCAN: dataset: ", ufilename, " will be deleted" sys.stdout.flush() shutil.rmtree(ufilename) #endif ufile_out, bad_filename = udb_util.udbfile_create(filelist_full, ufilename) if (len(bad_filename) > 0): print type(ufile_out) print "UDB_PROCESS1SCAN: Error creating: ", ufilename print "UDB_PROCESS1SCAN: Bad Filename: ", bad_filename sys.stdout.flush() #strip the path out of the bad_filename here bbb = bad_filename[len(bad_filename) - 17:] return ufile_out, bbb #endif if (len(ufile_out) == 0): print "UDB_PROCESS1SCAN: Error creating: ", ufilename sys.stdout.flush() return [], [] else: return [ufb], []
def udb_fb2process(ndays=5, day0=None): '''Reads in the last N days of FDB files, and IFDB files, finds the IDB files that need processing, and returns the fdb ifdb classes for those files''' #get FDB, IFDB filenames fdbfiles = udb_init_fdbfiles(ndays=ndays, day0=day0) if len(fdbfiles) == 0: print 'UDB_FB2PROCESS Error: No FDB files:' sys.stdout.flush() ifboops = [] fboops = [] return fboops, ifboops #endif ifdbfiles = udb_init_fdbfiles(ndays=ndays, fdbtype='IFDB', day0=day0) if len(ifdbfiles) == 0: # Not necessarily an error print 'UDB_FB2PROCESS: No IFDB files:' sys.stdout.flush() #endif #read in the files fb = [] for j in range(len(fdbfiles)): fbj = fdb.fdb_read(fdbfiles[j]) if len(fbj) > 0: fb = fb + fbj #endif #endfor ifb = [] if len(ifdbfiles) > 0: for j in range(len(ifdbfiles)): ifbj = fdb.pfdb_read(ifdbfiles[j]) if len(ifbj) > 0: ifb = ifb + ifbj #endif #endfor #endif #check for non-empty fb's first if len(fb) == 0: print "no data to process" sys.stdout.flush() otp = [] return fb, otp #endif #if I am here, I have IDB files from the FDB. Do I have PFDB entries? if len(ifb) == 0: print "no IFDB entries, process all FDB entries" sys.stdout.flush() #For each FDB entry, create a IFDB entry, with pstatus = 0 for j in range(len(fb)): ifbj = fdb.pfiledb(fb[j].fileid, fb[j].scanid, fb[j].sourceid, fb[j].projectid, fb[j].st_ts, fb[j].en_ts, 0) ifb.append(ifbj) #endfor return fb, ifb #endif #if I am here, I have both FDB and IFDB entries, for each FDB, #check to see if there is a corresponding IFDB. If not, create one #with pstatus = 0 iflist = fdb.fdb_list_fileid(ifb) iidarray = array(iflist) ifb_out = [] for j in range(len(fb)): fileidj = fb[j].fileid ifbtemp = extract(iidarray == fileidj, ifb) if (len(ifbtemp) > 0): ifb_outj = ifbtemp[0] else: ifb_outj = fdb.pfiledb(fb[j].fileid, fb[j].scanid, fb[j].sourceid, fb[j].projectid, fb[j].st_ts, fb[j].en_ts, 0) #endif ifb_out.append(ifb_outj) #endfor return fb, ifb_out
def udb_process1scan(ifb_1): '''Given the set of ifb entries, process them into a single UDB Miriad dataset for this scan''' # defines a varplt class for miriad python: class # TaskVarplt(TaskBase): _keywords = # ['vis','device','log','xaxis','yaxis','nxy','xrange','yrange'] # _options = ['dtime','compress','overlay','unwrap'] # a call to the varplt task TaskVarplt(vis='temp_pytest', # xaxis='ut', yaxis='ytsys', device='/xs', # xrange='0.975,0.977',options='overlay').run() # Uvaver is the class we'll call here: vis is a set of files, out # is the output file, line='channel,50,1,10,10' selects 50 # channels, starting with channel 1, summing over 10 channel, then # skipping to every 10th channel, interval = '0.666667' specifies # 4 second intervals # TaskUVAver(vis='/dppdata1/IDB/IDB20131220232416,/dppdata1/IDB/IDB20131220232516', # out='temp_pytest1', # line='channel,50,1,10,10',interval='0.066667').run() # switched to 1 second intervals, jmm, 2014-06-17 #first get the list of input files n1 = len(ifb_1) filelist = fdb.fdb_list_fileid(ifb_1) #Next, name the output files, this should be based on the start #time of the first file, changed from using scan_id, 2014-06-17, #jmm scan_id = ifb_1[0].scanid source_id = ifb_1[0].sourceid st_ts = ifb_1[0].st_ts en_ts = ifb_1[n1 - 1].en_ts #Take care of two digit year here # ufileid = 'UDB'+'20'+scan_id ufileid = 'U' + ifb_1[0].fileid[1:] yyyy = ufileid[3:7] ufilename = udbdir + yyyy + '/' + ufileid ufb = fdb.pfiledb(ufileid, scan_id, source_id, st_ts, en_ts, 0) #call taskuvaver on the file list, which looks like it needs to be #a comma-separated string filelist_str = '' filelist_str = idbdir + filelist[0] if n1 > 1: #just for testing if n1 > 5: n1 = 5 #end of just for testing for j in range(n1 - 1): filelist_str = filelist_str + ',' + idbdir + filelist[j + 1] #If the file exists, you need to delete it before uvavering if os.path.isdir(ufilename) == True: print "dataset: ", ufilename, " will be deleted" shutil.rmtree(ufilename) #Dropped 4 second averaging, 2014-06-17, jmm TaskUVAver(vis=filelist_str, out=ufilename, line='channel,50,1,10,10').run() return [ufb]