def convert_acistemp_into_c(): """ convert all acistemp fits files in K into that of C input: none, but read from <data_dir>/Acistemp/*fits output: converted fits files in Compaciscent/*fits """ outdir = data_dir2 + '/Compaciscent/' cmd = 'ls ' + data_dir + '/Acistemp/*fits* > ' + zspace os.system(cmd) fits_list = mcf.readFile(zspace) mcf.rm_file(zspace) for fits in fits_list: atemp = re.split('\/', fits) fname = atemp[-1] btemp = re.split('_', fname) msid = btemp[0] cols = [msid] + bcols flist = pyfits.open(fits) fdata = flist[1].data for col in cols: odata = fdata[col] - 273.15 #--- this is a numpy object fdata[col] = odata flist[1].data = fdata outfile = outdir + fname mcf.rm_file(outfile) flist.writeto(outfile)
def readBiasInfo(file): """ reads bias related data and make a list of 12 lists Input: file --- inputfile name Output: a list of 12 lists which contains: time, overclock, mode, ord_mode, outrow, num_row, sum2_2, deagain, biasalg, biasarg0, biasarg1, biasarg2, biasarg3 """ data = mcf.readFile(file) time = [] overclock = [] mode = [] ord_mode = [] outrow = [] num_row = [] sum2_2 = [] deagain = [] biasalg = [] biasarg0 = [] biasarg1 = [] biasarg2 = [] biasarg3 = [] bias = [] stest = 0 for ent in data: try: atemp = re.split('\s+|\t+', ent) dom = (float(atemp[0]) - 48902399) / 86400 dom = round(dom, 2) time.append(dom) overclock.append(float(atemp[1])) mode.append(atemp[2]) ord_mode.append(atemp[3]) outrow.append(int(atemp[4])) num_row.append(int(atemp[5])) sum2_2.append(int(atemp[6])) deagain.append(float(atemp[7])) biasalg.append(float(atemp[8])) biasarg0.append(float(atemp[9])) biasarg1.append(float(atemp[10])) biasarg2.append(float(atemp[11])) biasarg3.append(float(atemp[12])) stest += 1 except: pass if stest > 0: return [ time, overclock, mode, ord_mode, outrow, num_row, sum2_2, deagain, biasalg, biasarg0, biasarg1, biasarg2, biasarg3 ] else: return 0
def readBiasInfo(file): """ reads bias related data and make a list of 12 lists Input: file --- inputfile name Output: a list of 12 lists which contains: time, overclock, mode, ord_mode, outrow, num_row, sum2_2, deagain, biasalg, biasarg0, biasarg1, biasarg2, biasarg3 """ data = mcf.readFile(file) time = [] overclock = [] mode = [] ord_mode = [] outrow = [] num_row = [] sum2_2 = [] deagain = [] biasalg = [] biasarg0 = [] biasarg1 = [] biasarg2 = [] biasarg3 = [] bias = [] stest = 0 for ent in data: try: atemp = re.split('\s+|\t+', ent) dom = (float(atemp[0]) - 48902399)/86400 dom = round(dom, 2) time.append(dom) overclock.append(float(atemp[1])) mode.append(atemp[2]) ord_mode.append(atemp[3]) outrow.append(int(atemp[4])) num_row.append(int(atemp[5])) sum2_2.append(int(atemp[6])) deagain.append(float(atemp[7])) biasalg.append(float(atemp[8])) biasarg0.append(float(atemp[9])) biasarg1.append(float(atemp[10])) biasarg2.append(float(atemp[11])) biasarg3.append(float(atemp[12])) stest += 1 except: pass if stest > 0: return [time, overclock, mode, ord_mode, outrow, num_row, sum2_2, deagain, biasalg, biasarg0, biasarg1, biasarg2, biasarg3] else: return 0
def generate_all_plot(): """ a control function to create bias - overclock plots Input: None but read from: <data_dir>/Bias_save/CCD<ccd>/quad<quad> Output: <web_dir>/Plots/Sub2/bias_plot_ccd<ccd>_quad<quad>.png """ for ccd in range(0, 10): for quad in range(0, 4): # #--- set input and output file names # file = data_dir + 'Bias_save/CCD' + str(ccd) + '/quad' + str(quad) outname = web_dir + 'Plots/Sub2/bias_plot_ccd' + str( ccd) + '_quad' + str(quad) + '.png' # #--- read data # data = mcf.readFile(file) x = [] #---- original x y = [] #---- original y for ent in data: atemp = re.split('\s+|\t+', ent) try: valx = (float(atemp[0]) - 48902399.0) / 86400.0 valy = float(atemp[1]) - float(atemp[3]) # #--- if the difference si too large, drop it # if abs(valy) > 10: continue x.append(valx) y.append(valy) except: pass # #--- compute moving average and upper and lower envelopes #--- here, we set moving interval to 50 days and asked 5th degree polynomial fit for smoothing # moving_avg = fmv.find_moving_average(x, y, 50.0, 5) # #--- plot the data and fittings # plot_bias_trend(x, y, moving_avg, outname, ccd, quad)
def readData(ccd, head): """ read data from three files and return three list of data Input: infile1, infile2, infile3 --- three input file names Output: file1, file2, file3 --- three list of data """ file = data_dir + 'Disp_dir/' + head + str(ccd) + '_information' data = mcf.readFile(file) warm = get_elm(data[0]) flick = get_elm(data[1]) new = get_elm(data[2]) past = get_elm(data[3]) return [warm, flick, new, past]
def cleanUp(cdir): """ sort and remove duplicated lines in all files in given data directory Input cdir --- directory name Output cdir/files ---- cleaned up files """ if os.listdir(cdir) != []: cmd = 'ls ' + cdir + '/* > ' + zspace os.system(cmd) data = mcf.readFile(zspace) mcf.rm_file(zspace) for file in data: # #--- avoid html and png files # m = re.search('\.', file) if m is None: mcf.removeDuplicate(file, chk=1, dosort=1)
def update_bias_html(): """ pdate bias_home.html page Input: None but read from: <house_keeping>/bias_home.html Output: <web_dir>/bias_home.html """ # #--- find today's date # [year, mon, day, hours, min, sec, weekday, yday, dst] = tcnv.currentTime() lmon = str(mon) if mon < 10: lmon = '0' + lmon lday = str(day) if day < 10: lday = '0' + lday # #--- line to replace # newdate = "Last Upate: " + lmon + '/' + lday + '/' + str(year) # #--- read the template # line = house_keeping + 'bias_home.html' data = mcf.readFile(line) # #--- print out # outfile = web_dir + 'bias_home.html' fo = open(outfile, 'w') for ent in data: m = re.search('Last Update', ent) if m is not None: fo.write(newdate) else: fo.write(ent) fo.write('\n') fo.close()
def cleanUp(cdir): """ sort and remove duplicated lines in all files in given data directory Input cdir --- directory name Output cdir/files ---- cleaned up files """ if os.listdir(cdir) != []: cmd = 'ls ' + cdir + '/* > ' + zspace os.system(cmd) data = mcf.readFile(zspace) mcf.rm_file(zspace) for file in data: # #--- avoid html and png files # m = re.search('\.', file) if m is None: mcf.removeDuplicate(file, chk = 1, dosort=1)
def plot_num_ccds(mdir, dataSets, col, yname, lbound, ubound): """ creates history plots categorized by numbers of ccd used Input: mdir --- Output directory dataSets --- a list of multiple lists. each list contains category data (except the first one is time) lbound --- a lower boundary interval from the mean value of the data ubound --- a upper boundary interval from the mean value of the data col --- a position of data we want to use as a data also need: <data_dir>/Info_dir/list_of_ccd_no Output: <mdir>/no_ccds.png """ time = dataSets[0] dataset = dataSets[col] # #---- read ccd information --- ccd information coming from a different file # line = data_dir + 'Info_dir/list_of_ccd_no' data = mcf.readFile(line) ttime = [] ccd_no = [] for ent in data: atemp = re.split('\s+|\t+', ent) dom = (float(atemp[0]) - 48902399.0) / 86400.0 dom = round(dom, 2) ttime.append(dom) ccd_no.append(int(atemp[1])) x1 = [] y1 = [] x2 = [] y2 = [] x3 = [] y3 = [] # #--- compare time stamps and if they are same, catogorize the data # for i in range(0, len(time)): chk = 0 for j in range(0, len(ttime)): if time[i] == ttime[j]: if ccd_no[j] == 6: x1.append(time[i]) y1.append(dataset[i]) elif ccd_no[j] == 5: x2.append(time[i]) y2.append(dataset[i]) else: x3.append(time[i]) y3.append(dataset[i]) chk = 1 continue if chk > 0: continue xSets = [] ySets = [] yMinSets = [] yMaxSets = [] entLabels = [] xSets.append(x1) xSets.append(x2) xSets.append(x3) ySets.append(y1) ySets.append(y2) ySets.append(y3) entLabels.append('# of CCDs = 6') entLabels.append('# of CCDs = 5') entLabels.append('# of CCDs : Others') # #--- set plotting range # xmin = min(time) xmax = max(time) diff = xmax - xmin xmin = int(xmin - 0.05 * diff) if xmin < 0: xmin = 0 xmax = int(xmax + 0.05 * diff) # #--- for y axis, the range is the mean of the data - lbound/ + ubound # asum = 0.0 for ent in dataset: asum += float(ent) avg = asum / float(len(dataset)) if lbound > 10: ymin = int(avg - lbound) ymax = int(avg + ubound) else: ymin = avg - lbound ymin = round(ymin, 1) ymax = avg + ubound ymax = round(ymax, 1) for i in range(0, 3): yMinSets.append(ymin) yMaxSets.append(ymax) xname = 'Time (DOM)' # #--- calling plotting rouinte # pchk = plotPanel(xmin, xmax, yMinSets, yMaxSets, xSets, ySets, xname, yname, entLabels, mksize=1.0, lwidth=0.0) if pchk > 0: cmd = 'mv out.png ' + mdir + '/no_ccds.png' os.system(cmd)
def readBiasInfo2(ccd, quad, dataSets): """ reads bias data and adds the list to category information Input: ccd --- CCD # quad --- Quad # dataSets --- a list of 12 data sets (lists) which contains category data also need: <data_dir>/Bias_save/CCD<ccd>/quad<quad> Output: a list of 13 entiries; 12 above and one of category of <bias> - <overclock> at the 13th position """ dlen = len(dataSets) # #--- get a list of time stamp from the dataSets. # ctime = dataSets[0] # #--- read the bias data # line = data_dir + '/Bias_save/CCD' + str(ccd) + '/quad' + str(quad) data = mcf.readFile(line) biasSets = [] biasdata = [] # #--- initialize a list to read out each category from dataSets # for i in range(0, 13): exec "elm%s = []" % (str(i)) # #--- start checking bias data # for ent in data: atemp = re.split('\s+|\t+', ent) try: # #--- convert time to DOM # btime = float(atemp[0]) dom = (btime - 48902399.0) / 86400.0 dom = round(dom, 2) diff = float(atemp[1]) - float(atemp[3]) # #--- there are some bad data; ignore them # if abs(diff) > 5: continue # #--- match the time in two data sets # for i in range(0, len(ctime)): if dom < int(ctime[i]): break elif int(dom) == int(ctime[i]): # #--- if the time stamps match, save all category data # biasdata.append(diff) for j in range(0, dlen): earray = dataSets[j] val = earray[i] if isinstance(val, (long, int)): exec "elm%s.append(int(%s))" % (str(j), str(val)) elif isinstance(val, float): exec "elm%s.append(float(%s))" % (str(j), str(val)) else: exec "elm%s.append(str(val))" % (str(j)) break except: pass # #--- create a list of 13 lists # for i in range(0, 13): exec "biasSets.append(elm%s)" % (str(i)) biasSets.append(biasdata) return biasSets
def extract_bias_data(today_data, comp_test=''): """ extract bias data using a given data list Input: today_data --- a list of data fits files comp_test --- if 'test', test will be run also need: <house_keeping>/Defect/bad_col_list --- a list of known bad columns Output: <data_dir>/Bias_save/CCD<ccd>/quad<quad> see more in write_bias_data() <data_dir>/Info_dir/CCD<ccd>/quad<quad> see more in printBiasInfo() """ stime_list = [] for dfile in today_data: # #--- check whether file exists # chk = mcf.chkFile(dfile) if chk == 0: continue # #--- extract time stamp # stime = bcf.extractTimePart(dfile) if stime < 0: continue # #--- extract CCD information # [ ccd_id, readmode, date_obs, overclock_a, overclock_b, overclock_c, overclock_d ] = bcf.extractCCDInfo(dfile) if readmode != 'TIMED': continue bad_col0 = [] bad_col1 = [] bad_col2 = [] bad_col3 = [] line = house_keeping + 'Defect/bad_col_list' data = mcf.readFile(line) for ent in data: # #--- skip none data line # m = re.search('#', ent) if m is not None: continue atemp = re.split(':', ent) dccd = int(atemp[0]) if dccd == ccd_id: val = int(atemp[1]) if val <= 256: bad_col0.append(val) elif val <= 512: val -= 256 bad_col1.append(val) elif val <= 768: val -= 512 bad_col2.append(val) elif val <= 1024: val -= 768 bad_col3.append(val) # #--- trim the data at the threshold = 4000 # f = pyfits.open(dfile) sdata = f[0].data sdata[sdata < 0] = 0 sdata[sdata > 4000] = 0 f.close() # #--- compte and write out bias data # result_list = bcf.extractBiasInfo(dfile) if comp_test == 'test': return result_list else: [fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, \ overclock_a, overclock_b, overclock_c, overclock_d] = result_list write_bias_data(sdata, ccd_id, 0, overclock_a, stime, bad_col0) write_bias_data(sdata, ccd_id, 1, overclock_b, stime, bad_col1) write_bias_data(sdata, ccd_id, 2, overclock_c, stime, bad_col2) write_bias_data(sdata, ccd_id, 3, overclock_d, stime, bad_col3) # #---- more bias info # printBiasInfo(ccd_id, 0, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_a) printBiasInfo(ccd_id, 1, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_b) printBiasInfo(ccd_id, 2, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_c) printBiasInfo(ccd_id, 3, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_d) stime_list.append(stime) # #--- now count how many CCDs are used for a particular observations and write out to list_of_ccd_no # countObservation(stime_list)
def create_history_file(head): """ create count history file and the information file containing current bad entry information Input: head --- ccd, hccd, or col to indicate which data to handle Output: <head>_ccd<ccd>_cnt --- count history data:<dom><><year:ydate><><cumlative cnt><><cnt for the day> <head>_ccd<ccd>_information --- current information of the bad entries. For example, list warm pixels, flickering pixels, totally new pixels, and all past and current warm pixels. """ for ccd in range(0, 10): # #--- read data file head is either ccd, hccd, or col # file = data_dir + 'Disp_dir/hist_' + head + str(ccd) data = mcf.readFile(file) bad_dat_list = [] #--- save all bad data as elements bad_dat_save = [] #--- save all bad data as a list for each day dom = [] ydate = [] dcnt = [] #--- keep discreate count history ccnt = [] #--- keep cumulative count history new = [] #--- keep totally new bad entries in the last 5 days pcnt = 0 k = 0 tot = len(data) for ent in data: # #--- read only data entries written in a correct format: <dom><><year>:<ydate><>:<bad_data>... # atemp = re.split('<>', ent) chk1 = mcf.chkNumeric(atemp[0]) btemp = re.split(':', atemp[1]) chk2 = mcf.chkNumeric(btemp[1]) if (chk1 == True) and (int( atemp[0]) > 0) and (chk2 == True) and (int(btemp[1]) > 0): dom.append(atemp[0]) ydate.append(atemp[1]) # #--- check the bad data is recorded for the given day # if head == 'ccd' or head == 'hccd': m1 = re.search('\(', atemp[2]) else: btemp = re.split(':', atemp[2]) #--- case for warm columns if mcf.chkNumeric(btemp[len(btemp) - 1]): m1 = 'OK' else: m1 = None if m1 is not None: btemp = re.split(':', atemp[2]) if btemp != '': dcnt.append(len(btemp)) # #--- for the last five days, check whether there are any totally new bad entries exists # if k > tot - 5: for test in btemp: chk = 0 for comp in bad_dat_list: if test == comp: chk = 1 continue if chk == 0: new.append(test) bad_dat_list = bad_dat_list + btemp out = list(set(bad_dat_list)) pcnt = len(out) ccnt.append(pcnt) bad_dat_save.append(btemp) else: dcnt.append(0) bad_dat_save.append([]) ccnt.append(pcnt) k += 1 #--- k is inlimented to check the last 5 days # #--- find out which entries are warm/hot and flickering # [warm, flick, b_list, p_list] = find_warm_and_flickering(bad_dat_save) # #--- open output file to print current information # line = data_dir + '/Disp_dir/' + head + str(ccd) + '_information' fo = open(line, 'w') fo.write("warm:\t") print_data(fo, warm) fo.write('flick:\t') print_data(fo, flick) fo.write('new:\t') out = list(set(new)) print_data(fo, out) fo.write('past:\t') out = list(set(bad_dat_list)) print_data(fo, out) fo.close() # #--- open output file to print out count history # ofile = data_dir + 'Disp_dir/' + head + str(ccd) + '_cnt' fo = open(ofile, 'w') for i in range(0, len(dom)): if i < 13: line = dom[i] + '<>' + ydate[i] + '<>' + str( ccnt[i]) + '<>' + str(dcnt[i]) + '<>0<>0\n' else: line = dom[i] + '<>' + ydate[i] + '<>' + str( ccnt[i]) + '<>' + str(dcnt[i]) + '<>' + str( b_list[i - 13]) + '<>' + str(p_list[i - 13]) + '\n' fo.write(line) fo.close()
def get_data_list(comp_test=''): """ compare the current input list to the old one and select out the data which are not used Input: comp_test --- if it is "test" the test data is used house_keeping/old_file_list --- previous data list house_keeping/bad_fits_file --- bad fits data list the data are read from /dsops/ap/sdp/cache/*/acis/*evt1.fits (if it is an actual run) Output: input_data --- the data list """ # #--- create a current file list # if comp_test == 'test': cmd = 'ls -d /data/mta/Script/ACIS/Count_rate/house_keeping/Test_data_save/ACIS_rad_data/*evt1.fits > ' else: cmd = 'ls -d /dsops/ap/sdp/cache/*/acis/*evt1.fits > ' cmd = cmd + zspace os.system(cmd) f = open(zspace, 'r') data = [line.strip() for line in f.readlines()] f.close() mcf.rm_file(zspace) # #--- choose files with only non-calibration data # file_list = [] for ent in data: atemp = re.split('acisf', ent) btemp = re.split('_', atemp[1]) try: val = float(btemp[0]) #---- for the new format : acisf16218_000N001_evt1.fits mark= int(val) except: ctemp = re.split('N', btemp[1]) #---- for the older format : acisf16218N001_evt1.fits mark = int(ctemp[0]) if mark < 50000: file_list.append(ent) # #--- read the old file list # file = house_keeping + 'old_file_list' old_list = mcf.readFile(file) # #--- read bad fits file list # try: file2 = house_keeping + 'bad_fits_file' bad_list = mcf.readFile(file2) except: bad_list = [] # #--- update old_file_list while reading out new files # f = open(file, 'w') # #--- compara two files and select out new file names # input_data = [] for ent in file_list: f.write(ent) f.write('\n') chk = 1 for comp in old_list: if ent == comp: chk = 0 break if chk == 1: chk2 = 1 for bad in bad_list: if ent == bad: chk2 = 0 break if chk2 == 1: input_data.append(ent) f.close() return input_data
def readData(dataname): """ read data and set plotting range Input: dataname --- data file name (need a full path to the file) Output: xval --- an array of independent values (dom) cval --- cumulative counts dval --- daily counts bval --- actual bad point counts pval --- potential bad point counts """ # #--- read data # data = mcf.readFile(dataname) xval = [] cval = [] dval = [] bval = [] pval = [] prev = 0 for ent in data: atemp = re.split('<>', ent) try: val = float(atemp[0]) if val < 0: continue if val == prev: continue xval.append(int(val)) val1 = float(atemp[2]) val2 = float(atemp[3]) val3 = float(atemp[4]) val4 = float(atemp[5]) cval.append(val1) dval.append(val2) bval.append(val3) pval.append(val3 + val4) except: pass # #-- find plotting ranges and make a list of data lists # xmin_list = [] xmax_list = [] ymin_list = [] ymax_list = [] x_list = [] y_list = [] (xmin, xmax, ymin, ymax) = findPlottingRange(xval, cval) xmin_list.append(xmin) xmax_list.append(xmax) ymin_list.append(ymin) ymax_list.append(ymax) x_list.append(xval) y_list.append(cval) (xmin, xmax, ymin, ymax) = findPlottingRange(xval, dval) xmin_list.append(xmin) xmax_list.append(xmax) ymin_list.append(ymin) ymax_list.append(ymax) x_list.append(xval) y_list.append(dval) (xmin, xmax, ymin, ymax) = findPlottingRange(xval, bval) xmin_list.append(xmin) xmax_list.append(xmax) ymin_list.append(ymin) ymax_list.append(ymax) x_list.append(xval) y_list.append(bval) (xmin, xmax, ymin, ymax) = findPlottingRange(xval, pval) xmin_list.append(xmin) xmax_list.append(xmax) ymin_list.append(ymin) ymax_list.append(ymax) x_list.append(xval) y_list.append(pval) return [xmin_list, xmax_list, ymin_list, ymax_list, x_list, y_list]
def extract_bias_data(today_data, comp_test=''): """ extract bias data using a given data list Input: today_data --- a list of data fits files comp_test --- if 'test', test will be run also need: <house_keeping>/Defect/bad_col_list --- a list of known bad columns Output: <data_dir>/Bias_save/CCD<ccd>/quad<quad> see more in write_bias_data() <data_dir>/Info_dir/CCD<ccd>/quad<quad> see more in printBiasInfo() """ stime_list = [] for dfile in today_data: # #--- check whether file exists # chk = mcf. chkFile(dfile) if chk == 0: continue # #--- extract time stamp # stime = bcf.extractTimePart(dfile) if stime < 0: continue # #--- extract CCD information # [ccd_id, readmode, date_obs, overclock_a, overclock_b, overclock_c, overclock_d] = bcf.extractCCDInfo(dfile) if readmode != 'TIMED': continue bad_col0 = [] bad_col1 = [] bad_col2 = [] bad_col3 = [] line = house_keeping + 'Defect/bad_col_list' data = mcf.readFile(line) for ent in data: # #--- skip none data line # m = re.search('#', ent) if m is not None: continue atemp = re.split(':', ent) dccd = int(atemp[0]) if dccd == ccd_id: val = int(atemp[1]) if val <= 256: bad_col0.append(val) elif val <= 512: val -= 256 bad_col1.append(val) elif val <= 768: val -= 512 bad_col2.append(val) elif val <= 1024: val -= 768 bad_col3.append(val) # #--- trim the data at the threshold = 4000 # f = pyfits.open(dfile) sdata = f[0].data sdata[sdata < 0] = 0 sdata[sdata > 4000] = 0 f.close() # #--- compte and write out bias data # result_list = bcf.extractBiasInfo(dfile) if comp_test == 'test': return result_list else: [fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, \ overclock_a, overclock_b, overclock_c, overclock_d] = result_list write_bias_data(sdata, ccd_id, 0, overclock_a, stime, bad_col0) write_bias_data(sdata, ccd_id, 1, overclock_b, stime, bad_col1) write_bias_data(sdata, ccd_id, 2, overclock_c, stime, bad_col2) write_bias_data(sdata, ccd_id, 3, overclock_d, stime, bad_col3) # #---- more bias info # printBiasInfo(ccd_id, 0, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_a) printBiasInfo(ccd_id, 1, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_b) printBiasInfo(ccd_id, 2, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_c) printBiasInfo(ccd_id, 3, stime, fep, dmode, srow, rowcnt, orcmode, dgain, biasalg, barg0, barg1, barg2, barg3, overclock_d) stime_list.append(stime) # #--- now count how many CCDs are used for a particular observations and write out to list_of_ccd_no # countObservation(stime_list)
def find_today_data(comp_test=''): """ find which data to use for the data anaysis Input: comp_test if it is 'test', read testdata data are also read from <hosue_keeping>/past_input_data /dsops/ap/sdp/cache/*/acis/*bias0.fits Output: today_data ---- a list of fits files to be used """ if comp_test == "test": # #--- test case # cmd = 'ls /data/mta/Script/ACIS/Bad_pixels/house_keeping/Test_data_save/Test_data/* >' + zspace os.system(cmd) today_data = mcf.readFile(zspace) mcf.rm_file(zspace) else: # #--- normal case # file = house_keeping + 'past_input_data' data1 = mcf.readFile(file) try: atemp = re.split('\/', data1[len(data1)-1]) btemp = re.split('_', atemp[5]) cut_date = btemp[0] + btemp[1] + btemp[2] cut_date = int(cut_date) except: cut_date = 0 file2 = house_keeping + 'past_input_data~' cmd = 'mv ' + file + ' ' + file2 os.system(cmd) # #--- read the current data list # cmd = 'ls /dsops/ap/sdp/cache/*/acis/*bias0.fits >' + zspace os.system(cmd) f = open(zspace, 'r') data2 = [line.strip() for line in f.readlines()] f.close() mcf.rm_file(zspace) today_data = [] fo = open(file, 'w') for ent in data2: fo.write(ent) fo.write('\n') chk = 0 for comp in data1: if ent == comp: chk = 1 break if chk == 0: atemp = re.split('\/', ent) btemp = re.split('_', atemp[5]) date = btemp[0] + btemp[1] + btemp[2] if int(date) > cut_date: today_data.append(ent) fo.close() return today_data
def readBiasInfo2(ccd, quad, dataSets): """ reads bias data and adds the list to category information Input: ccd --- CCD # quad --- Quad # dataSets --- a list of 12 data sets (lists) which contains category data also need: <data_dir>/Bias_save/CCD<ccd>/quad<quad> Output: a list of 13 entiries; 12 above and one of category of <bias> - <overclock> at the 13th position """ dlen = len(dataSets) # #--- get a list of time stamp from the dataSets. # ctime = dataSets[0] # #--- read the bias data # line = data_dir + '/Bias_save/CCD' + str(ccd) + '/quad' + str(quad) data = mcf.readFile(line) biasSets = [] biasdata = [] # #--- initialize a list to read out each category from dataSets # for i in range(0, 13): exec "elm%s = []" % (str(i)) # #--- start checking bias data # for ent in data: atemp = re.split('\s+|\t+', ent) try: # #--- convert time to DOM # btime = float(atemp[0]) dom = (btime - 48902399.0)/86400.0 dom = round(dom, 2) diff = float(atemp[1]) - float(atemp[3]) # #--- there are some bad data; ignore them # if abs(diff) > 5: continue # #--- match the time in two data sets # for i in range(0, len(ctime)): if dom < int(ctime[i]): break elif int(dom) == int(ctime[i]): # #--- if the time stamps match, save all category data # biasdata.append(diff) for j in range(0, dlen): earray = dataSets[j] val = earray[i] if isinstance(val, (long, int)): exec "elm%s.append(int(%s))" % (str(j), str(val)) elif isinstance(val, float): exec "elm%s.append(float(%s))" % (str(j), str(val)) else: exec "elm%s.append(str(val))" % (str(j)) break except: pass # #--- create a list of 13 lists # for i in range(0, 13): exec "biasSets.append(elm%s)" % (str(i)) biasSets.append(biasdata) return biasSets
def plot_num_ccds(mdir, dataSets, col, yname, lbound, ubound): """ creates history plots categorized by numbers of ccd used Input: mdir --- Output directory dataSets --- a list of multiple lists. each list contains category data (except the first one is time) lbound --- a lower boundary interval from the mean value of the data ubound --- a upper boundary interval from the mean value of the data col --- a position of data we want to use as a data also need: <data_dir>/Info_dir/list_of_ccd_no Output: <mdir>/no_ccds.png """ time = dataSets[0] dataset = dataSets[col] # #---- read ccd information --- ccd information coming from a different file # line = data_dir + 'Info_dir/list_of_ccd_no' data = mcf.readFile(line) ttime = [] ccd_no = [] for ent in data: atemp = re.split('\s+|\t+', ent) dom = (float(atemp[0]) - 48902399.0)/86400.0 dom = round(dom, 2) ttime.append(dom) ccd_no.append(int(atemp[1])) x1 = [] y1 = [] x2 = [] y2 = [] x3 = [] y3 = [] # #--- compare time stamps and if they are same, catogorize the data # for i in range(0, len(time)): chk = 0 for j in range(0, len(ttime)): if time[i] == ttime[j]: if ccd_no[j] == 6: x1.append(time[i]) y1.append(dataset[i]) elif ccd_no[j] == 5: x2.append(time[i]) y2.append(dataset[i]) else: x3.append(time[i]) y3.append(dataset[i]) chk = 1 continue if chk > 0: continue xSets = [] ySets = [] yMinSets = [] yMaxSets = [] entLabels = [] xSets.append(x1) xSets.append(x2) xSets.append(x3) ySets.append(y1) ySets.append(y2) ySets.append(y3) entLabels.append('# of CCDs = 6') entLabels.append('# of CCDs = 5') entLabels.append('# of CCDs : Others') # #--- set plotting range # xmin = min(time) xmax = max(time) diff = xmax - xmin xmin = int(xmin - 0.05 * diff) if xmin < 0: xmin = 0 xmax = int(xmax + 0.05 * diff) # #--- for y axis, the range is the mean of the data - lbound/ + ubound # asum = 0.0 for ent in dataset: asum += float(ent) avg = asum / float(len(dataset)) if lbound > 10: ymin = int(avg - lbound) ymax = int(avg + ubound) else: ymin = avg - lbound ymin = round(ymin, 1) ymax = avg + ubound ymax = round(ymax, 1) for i in range(0, 3): yMinSets.append(ymin) yMaxSets.append(ymax) xname = 'Time (DOM)' # #--- calling plotting rouinte # pchk = plotPanel(xmin, xmax, yMinSets, yMaxSets, xSets, ySets, xname, yname, entLabels, mksize=1.0, lwidth=0.0) if pchk > 0: cmd = 'mv out.png ' + mdir + '/no_ccds.png' os.system(cmd)
def create_history_file(head): """ create count history file and the information file containing current bad entry information Input: head --- ccd, hccd, or col to indicate which data to handle Output: <head>_ccd<ccd>_cnt --- count history data:<dom><><year:ydate><><cumlative cnt><><cnt for the day> <head>_ccd<ccd>_information --- current information of the bad entries. For example, list warm pixels, flickering pixels, totally new pixels, and all past and current warm pixels. """ for ccd in range(0, 10): # #--- read data file head is either ccd, hccd, or col # file = data_dir + 'Disp_dir/hist_' + head + str(ccd) data = mcf.readFile(file) bad_dat_list = [] #--- save all bad data as elements bad_dat_save = [] #--- save all bad data as a list for each day dom = [] ydate = [] dcnt = [] #--- keep discreate count history ccnt = [] #--- keep cumulative count history new = [] #--- keep totally new bad entries in the last 5 days pcnt = 0 k = 0 tot = len(data) for ent in data: # #--- read only data entries written in a correct format: <dom><><year>:<ydate><>:<bad_data>... # atemp = re.split('<>', ent) chk1 = mcf.chkNumeric(atemp[0]) btemp = re.split(':', atemp[1]) chk2 = mcf.chkNumeric(btemp[1]) if (chk1 == True) and (int(atemp[0]) > 0) and (chk2 == True) and (int(btemp[1]) > 0): dom.append(atemp[0]) ydate.append(atemp[1]) # #--- check the bad data is recorded for the given day # if head == 'ccd' or head == 'hccd': m1 = re.search('\(', atemp[2]) else: btemp = re.split(':', atemp[2]) #--- case for warm columns if mcf.chkNumeric(btemp[len(btemp) -1]): m1 = 'OK' else: m1 = None if m1 is not None: btemp = re.split(':', atemp[2]) if btemp != '': dcnt.append(len(btemp)) # #--- for the last five days, check whether there are any totally new bad entries exists # if k > tot - 5: for test in btemp: chk = 0 for comp in bad_dat_list: if test == comp: chk = 1 continue if chk == 0: new.append(test) bad_dat_list = bad_dat_list + btemp out = list(set(bad_dat_list)) pcnt = len(out) ccnt.append(pcnt) bad_dat_save.append(btemp) else: dcnt.append(0) bad_dat_save.append([]) ccnt.append(pcnt) k += 1 #--- k is inlimented to check the last 5 days # #--- find out which entries are warm/hot and flickering # [warm, flick, b_list, p_list]= find_warm_and_flickering(bad_dat_save) # #--- open output file to print current information # line = data_dir + '/Disp_dir/'+ head + str(ccd) + '_information' fo = open(line, 'w') fo.write("warm:\t") print_data(fo, warm) fo.write('flick:\t') print_data(fo, flick) fo.write('new:\t') out = list(set(new)) print_data(fo, out) fo.write('past:\t') out = list(set(bad_dat_list)) print_data(fo, out) fo.close() # #--- open output file to print out count history # ofile = data_dir + 'Disp_dir/' + head + str(ccd) + '_cnt' fo = open(ofile, 'w') for i in range(0, len(dom)): if i < 13: line = dom[i] + '<>' + ydate[i] + '<>' + str(ccnt[i]) + '<>' + str(dcnt[i]) + '<>0<>0\n' else: line = dom[i] + '<>' + ydate[i] + '<>' + str(ccnt[i]) + '<>' + str(dcnt[i]) + '<>'+ str(b_list[i-13]) + '<>' + str(p_list[i-13]) + '\n' fo.write(line) fo.close()
def find_today_data(comp_test=''): """ find which data to use for the data anaysis Input: comp_test if it is 'test', read testdata data are also read from <hosue_keeping>/past_input_data /dsops/ap/sdp/cache/*/acis/*bias0.fits Output: today_data ---- a list of fits files to be used """ if comp_test == "test": # #--- test case # cmd = 'ls /data/mta/Script/ACIS/Bad_pixels/house_keeping/Test_data_save/Test_data/* >' + zspace os.system(cmd) today_data = mcf.readFile(zspace) mcf.rm_file(zspace) else: # #--- normal case # file = house_keeping + 'past_input_data' data1 = mcf.readFile(file) try: atemp = re.split('\/', data1[len(data1) - 1]) btemp = re.split('_', atemp[5]) cut_date = btemp[0] + btemp[1] + btemp[2] cut_date = int(cut_date) except: cut_date = 0 file2 = house_keeping + 'past_input_data~' cmd = 'mv ' + file + ' ' + file2 os.system(cmd) # #--- read the current data list # cmd = 'ls /dsops/ap/sdp/cache/*/acis/*bias0.fits >' + zspace os.system(cmd) f = open(zspace, 'r') data2 = [line.strip() for line in f.readlines()] f.close() mcf.rm_file(zspace) today_data = [] fo = open(file, 'w') for ent in data2: fo.write(ent) fo.write('\n') chk = 0 for comp in data1: if ent == comp: chk = 1 break if chk == 0: atemp = re.split('\/', ent) btemp = re.split('_', atemp[5]) date = btemp[0] + btemp[1] + btemp[2] if int(date) > cut_date: today_data.append(ent) fo.close() return today_data
def get_data_list(comp_test=''): """ compare the current input list to the old one and select out the data which are not used Input: comp_test --- if it is "test" the test data is used house_keeping/old_file_list --- previous data list house_keeping/bad_fits_file --- bad fits data list the data are read from /dsops/ap/sdp/cache/*/acis/*evt1.fits (if it is an actual run) Output: input_data --- the data list """ # #--- create a current file list # if comp_test == 'test': cmd = 'ls -d /data/mta/Script/ACIS/Count_rate/house_keeping/Test_data_save/ACIS_rad_data/*evt1.fits > ' else: cmd = 'ls -d /dsops/ap/sdp/cache/*/acis/*evt1.fits > ' cmd = cmd + zspace os.system(cmd) f = open(zspace, 'r') data = [line.strip() for line in f.readlines()] f.close() mcf.rm_file(zspace) # #--- choose files with only non-calibration data # file_list = [] for ent in data: atemp = re.split('acisf', ent) btemp = re.split('_', atemp[1]) ctemp = re.split('N', btemp[1]) mark = int(ctemp[0]) if mark < 50000: file_list.append(ent) # #--- read the old file list # file = house_keeping + 'old_file_list' old_list = mcf.readFile(file) # #--- read bad fits file list # try: file2 = house_keeping + 'bad_fits_file' bad_list = mcf.readFile(file2) except: bad_list = [] # #--- update old_file_list while reading out new files # f = open(file, 'w') # #--- compara two files and select out new file names # input_data = [] for ent in file_list: f.write(ent) f.write('\n') chk = 1 for comp in old_list: if ent == comp: chk = 0 break if chk == 1: chk2 = 1 for bad in bad_list: if ent == bad: chk2 = 0 break if chk2 == 1: input_data.append(ent) f.close() return input_data