def get_calfac(t=None): ''' Read total power and auto-correlation calibration factors from the SQL database, for the time specified by Time() object t, or if None, at the next earlier calibration time to the current time. ''' tpcal_type = 10 # Calibration type specified in cal_header.py if t is None: t = Time.now() xml, buf = ch.read_cal(tpcal_type, t=t) fghz = stateframe.extract(buf, xml['FGHz']) nf = len(fghz) tpcalfac = np.zeros((13, 2, nf), np.float) tpoffsun = np.zeros((13, 2, nf), np.float) accalfac = np.zeros((13, 2, nf), np.float) acoffsun = np.zeros((13, 2, nf), np.float) nant = len(xml['Antenna']) for i in range(nant): iant = stateframe.extract(buf, xml['Antenna'][i]['Antnum']) - 1 tpcalfac[iant] = stateframe.extract(buf, xml['Antenna'][i]['TPCalfac']) accalfac[iant] = stateframe.extract(buf, xml['Antenna'][i]['ACCalfac']) tpoffsun[iant] = stateframe.extract(buf, xml['Antenna'][i]['TPOffsun']) acoffsun[iant] = stateframe.extract(buf, xml['Antenna'][i]['ACOffsun']) return { 'fghz': fghz, 'timestamp': stateframe.extract(buf, xml['Timestamp']), 'tpcalfac': tpcalfac, 'accalfac': accalfac, 'tpoffsun': tpoffsun, 'acoffsun': acoffsun }
def get_gain_corr(trange, tref=None, fghz=None): ''' Calls get_gain_state() for a timerange and a reference time, and returns the gain difference table to apply to data in the given timerange. If no reference time is provided, the gain state is referred to the nearest earlier REFCAL. Returns a dictionary containing: antgain Array of size (15, 2, 34, nt) = (nant, npol, nbands, nt) times A Time() object corresponding to the times in antgain ''' if tref is None: # No reference time specified, so get nearest earlier REFCAL xml, buf = ch.read_cal(8,t=trange[0]) tref = Time(stf.extract(buf,xml['Timestamp']),format='lv') # Get the gain state at the reference time (actually median over 1 minute) trefrange = Time([tref.iso,Time(tref.lv+61,format='lv').iso]) ref_gs = get_gain_state(trefrange) # refcal gain state for 60 s # Get median of refcal gain state (which should be constant anyway) ref_gs['h1'] = np.median(ref_gs['h1'],1) ref_gs['h2'] = np.median(ref_gs['h2'],1) ref_gs['v1'] = np.median(ref_gs['v1'],1) ref_gs['v2'] = np.median(ref_gs['v2'],1) # Get the gain state of the requested timerange src_gs = get_gain_state(trange) # solar gain state for timerange of file nt = len(src_gs['times']) antgain = np.zeros((15,2,34,nt),np.float32) # Antenna-based gains vs. band for i in range(15): for j in range(34): antgain[i,0,j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs['h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][i,0,j] - ref_gs['dcmattn'][i,0,j] antgain[i,1,j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs['v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][i,1,j] - ref_gs['dcmattn'][i,1,j] return {'antgain': antgain, 'times': src_gs['times']}
def sql2rstn(t=None): """This function extracts the RSTN data from SQL with SQL_timestamp 0300 on the date supplied. If the values could be extracted then the data is returned in a list as follows: 0 - timestamp: Astropy Time which is the date on which the data was collected. This should match dt 1 - freq: A float32 numpy array containing the 9 frequencies in GHz 2 - data: The flux data which is a 9x7 int16 numpy array. SQL_timestamp is also returned.""" if t is None: t = Time.now() sqlt = Time(np.floor(t.mjd) + 0.125, format='mjd') xml, buf = ch.read_cal(12, sqlt) if buf is None: return None, None sqlt_read = Time(extract(buf, xml['SQL_timestamp']), format='lv') #if np.floor(sqlt.mjd) != np.floor(sqlt_read.mjd): return None, None data = [] data.append(Time(extract(buf, xml['Timestamp']), format='lv')) data.append(extract(buf, xml['FGHz'])) data.append(extract(buf, xml['Flux'])) return data, sqlt_read
def sql2refcal(t, lohi=False): '''Supply a timestamp in Time format, return the closest refcal data''' import cal_header as ch import stateframe as stf if lohi: caltype = 12 else: caltype = 8 xml, buf = ch.read_cal(caltype, t=t) refcal = stf.extract( buf, xml['Refcal_Real']) + stf.extract(buf, xml['Refcal_Imag']) * 1j flag = stf.extract(buf, xml['Refcal_Flag']) fghz = stf.extract(buf, xml['Fghz']) sigma = stf.extract(buf, xml['Refcal_Sigma']) timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv') tbg = Time(stf.extract(buf, xml['T_beg']), format='lv') ted = Time(stf.extract(buf, xml['T_end']), format='lv') pha = np.angle(refcal) amp = np.absolute(refcal) return { 'pha': pha, 'amp': amp, 'flag': flag, 'fghz': fghz, 'sigma': sigma, 'timestamp': timestamp, 't_bg': tbg, 't_ed': ted }
def unrot_refcal(refcal_in): ''' Apply feed-rotation correction to data read with rd_refcal(), returning updated data in the same format for further processing. ''' import dbutil as db import copy import chan_util_bc as cu import cal_header as ch from stateframe import extract refcal = copy.deepcopy(refcal_in) xml, buf = ch.read_cal(11, Time(refcal['times'][0][0], format='jd')) dph = extract(buf, xml['XYphase']) xi_rot = extract(buf, xml['Xi_Rot']) freq = extract(buf, xml['FGHz']) freq = freq[np.where(freq != 0)] band = [] for f in freq: band.append(cu.freq2bdname(f)) bds, sidx = np.unique(band, return_index=True) nbd = len(bds) eidx = np.append(sidx[1:], len(band)) dxy = np.zeros((14, 34), dtype=np.float) xi = np.zeros(34, dtype=np.float) fghz = np.zeros(34) # average dph and xi_rot frequencies within each band, to convert to 34-band representation for b, bd in enumerate(bds): fghz[bd - 1] = np.nanmean(freq[sidx[b]:eidx[b]]) xi[bd - 1] = np.nanmean(xi_rot[sidx[b]:eidx[b]]) for a in range(14): dxy[a, bd - 1] = np.angle(np.sum(np.exp(1j * dph[a, sidx[b]:eidx[b]]))) nscans = len(refcal['scanlist']) for i in range(nscans): # Read parallactic angles for this scan trange = Time([refcal['tstlist'][i].iso, refcal['tedlist'][i].iso]) times, chi = db.get_chi(trange) tchi = times.jd t = refcal['times'][i] if len(t) > 0: vis = copy.deepcopy(refcal['vis'][i]) idx = nearest_val_idx(t, tchi) pa = chi[idx] # Parallactic angle for the times of this refcal. pa[:, [8, 9, 10, 12]] = 0.0 nt = len(idx) # Number of times in this refcal # Apply X-Y delay phase correction for a in range(13): a1 = lobe(dxy[a] - dxy[13]) a2 = -dxy[13] - xi a3 = dxy[a] - xi + np.pi for j in range(nt): vis[a, 1, :, j] *= np.exp(1j * a1) vis[a, 2, :, j] *= np.exp(1j * a2) vis[a, 3, :, j] *= np.exp(1j * a3) for j in range(nt): for a in range(13): refcal['vis'][i][a, 0, :, j] = vis[a, 0, :, j] * np.cos(pa[j, a]) + vis[a, 3, :, j] * np.sin(pa[j, a]) refcal['vis'][i][a, 2, :, j] = vis[a, 2, :, j] * np.cos(pa[j, a]) + vis[a, 1, :, j] * np.sin(pa[j, a]) refcal['vis'][i][a, 3, :, j] = vis[a, 3, :, j] * np.cos(pa[j, a]) - vis[a, 0, :, j] * np.sin(pa[j, a]) refcal['vis'][i][a, 1, :, j] = vis[a, 1, :, j] * np.cos(pa[j, a]) - vis[a, 2, :, j] * np.sin(pa[j, a]) return refcal
def rstntext2sql(startdt, enddt, logfile=None): """This routine extracts data from the old archive text file and writes it to SQL. It will output a list of dates that were not archived. The SQL time will be the date of the data at 0300. The program checks to see if there is current data already for the date range. It will NOT overwrite a record if it is already present. startdt and enddt are the dates that will be written to SQL from startdt up to but not including enddt.""" data = rstnfluxfromtextarchive(startdt, enddt) if data is None: print "No RSTN data found in range ", startdt.iso, " to ", enddt.iso return print "Processing data from: ", startdt.iso, " to ", enddt.iso offset = int(np.floor(startdt.mjd)) days = int(np.floor(enddt.mjd)) - offset processed = np.zeros((days), dtype=bool) recordswritten = 0 existingrecords = 0 for d in data: print "Processing Date: ", d[0].iso i = int(np.floor(d[0].mjd)) - offset processed[i] = True sqltime = Time(np.floor(d[0].mjd) + 0.125, format='mjd') xml, buf = ch.read_cal(12, sqltime) if buf is not None: sqltime_read = Time(extract(buf, xml['SQL_timestamp']), format='lv') if np.floor(sqltime_read.mjd) != np.floor(sqltime.mjd): buf = None if buf is None: if ch.rstnflux2sql(d, sqltime): recordswritten += 1 print "Record Written" else: print "Record Write Failed!" else: print "Record Exists." existingrecords += 1 if logfile is None: logfile = "/tmp/missingrstn.txt" f = open(logfile, "w") for i in range(days): if not processed[i]: f.write(Time(float(i + offset) + 0.125, format='mjd').iso + "\n") f.close() print "Number of days searched: ", days print "Number of existing records: ", existingrecords print "Records Written: ", recordswritten print "Missing Records: ", days - (recordswritten + existingrecords)
def DCM_cal(filename=None,fseqfile='gainseq.fsq',dcmattn=None,missing='ant15',update=False): if filename is None: return 'Must specify ADC packet capture filename, e.g. "/dppdata1/PRT/PRT<yyyymmddhhmmss>adc.dat"' userpass = '******' fseq_handle = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/'+fseqfile,timeout=0.5) lines = fseq_handle.readlines() fseq_handle.close() for line in lines: if line.find('LIST:SEQUENCE') != -1: line = line[14:] bandlist = np.array(map(int,line.split(','))) if len(np.unique(bandlist)) != 34: print 'Frequency sequence must contain all bands [1-34]' return None # Read packet capture file adc = p.rd_jspec(filename) pwr = np.rollaxis(adc['phdr'],2)[:,:,:2] # Put measured power into uniform array arranged by band new_pwr = np.zeros((34,16,2)) for i in range(34): idx, = np.where(bandlist-1 == i) if len(idx) > 0: new_pwr[i] = np.median(pwr[idx],0) new_pwr.shape = (34,32) # Read table from the database. import cal_header import stateframe xml, buf = cal_header.read_cal(2) cur_table = stateframe.extract(buf,xml['Attenuation']) if dcmattn: # A DCM attenuation value was given, which presumes a constant value # so use it as the "original table." orig_table = np.zeros((34,30)) + dcmattn # orig_table[:,26:28] = 24 orig_table[:,28:] = 0 else: # No DCM attenuation value was given, so use current DCM master # table from the database. orig_table = cur_table attn = np.log10(new_pwr[:,:-2]/1600.)*10. # Zero any changes for missing antennas, and override orig_table with cur_table for those antennas if missing: idx = p.ant_str2list(missing) bad = np.sort(np.concatenate((idx*2,idx*2+1))) attn[:,bad] = 0 orig_table[:,bad] = cur_table[:,bad] new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2 DCMlines = [] DCMlines.append('# Ant1 Ant2 Ant3 Ant4 Ant5 Ant6 Ant7 Ant8 Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15') DCMlines.append('# X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y') DCMlines.append('# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----') for band in range(1,35): DCMlines.append('{:2} : {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1])) return DCMlines
def sql2refcal(t): '''Supply a timestamp in Time format, return the closest refcal data''' import cal_header as ch import stateframe as stf xml, buf = ch.read_cal(8, t=t) refcal = stf.extract(buf, xml['Refcal_Real']) + stf.extract(buf, xml['Refcal_Imag']) * 1j flag = stf.extract(buf, xml['Refcal_Flag']) fghz = stf.extract(buf, xml['Fghz']) sigma = stf.extract(buf, xml['Refcal_Sigma']) timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv') tbg = Time(stf.extract(buf, xml['T_beg']), format='lv') ted = Time(stf.extract(buf, xml['T_end']), format='lv') pha = np.angle(refcal) amp = np.absolute(refcal) return {'pha': pha, 'amp': amp, 'flag': flag, 'fghz': fghz, 'sigma': sigma, 'timestamp': timestamp, 't_bg': tbg, 't_ed': ted}
def compare_tbl(tbl, t=None): ''' Compare the given table with that from the SQL database. Inputs: tbl A text string version of a table as returned by DCM_calnew() t An optional Time() object giving a date/time of the table to compare to. If omitted or None, the currently active table is used. Note, the exact date is not required. Any date after the desired SQL record will return the active table for that date. ''' import cal_header import stateframe from copy import deepcopy xml, buf = cal_header.read_cal(2, t=t) cur_table = stateframe.extract(buf,xml['Attenuation']) diftbl = deepcopy(tbl) # Make a copy to update, to preserve structure for i in range(52): dif = map(int,tbl[i+3][4:].split()) - cur_table[i] diftbl[3+i] = '{:2} : {:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}'.format(i,*dif) return diftbl
def bandlist2dcmtable(bandlist, toACC=False): '''Use list of bands representing a frequency sequence, to set dcmtable.txt from the DCM_master_table, and return the lines of the table. Optionally, the table is sent to ACC. Input: bandlist numpy 50-element integer array of band numbers, 1-34 toACC optional boolean. If True, sends results to ACC and the SQL database. Default is False (does not send) ''' import stateframe as stf import cal_header as ch from ftplib import FTP # Convert from 1-based bandlist to zero-based band numbers bands = bandlist - 1 # Read master table from SQL server dcm, buf = ch.read_cal(2) dcm_m_attn = stf.extract(buf, dcm['Attenuation']) dcm_attn = dcm_m_attn[bands] lines = [] g = open('/tmp/DCM_table.txt', 'w') for line in dcm_attn: l = ' '.join(map(str, line)) lines.append(l) g.write(l + '\n') g.close() if toACC: ch.dcm_table2sql(lines) # Connect to ACC /parm directory and transfer scan_header files try: g = open('/tmp/DCM_table.txt', 'r') acc = FTP('acc.solar.pvt') acc.login('admin', 'observer') acc.cwd('parm') # Send DCM table lines to ACC print acc.storlines('STOR dcm.txt', g) g.close() print 'Successfully wrote dcm.txt to ACC' except: print 'Cannot FTP dcm.txt to ACC' return lines
def read_dbcalfac(t): ''' Read the contents of a SOLPNT calibration file from the SQL database, and return fghz, calfac, offsun arrays ''' import stateframe import cal_header as ch try: tp, buf = ch.read_cal(1,t) except: print 'READ_DBCALFAC: Error reading calibration factors' return None, None, None fghz = stateframe.extract(buf,tp['FGHz']) poln = stateframe.extract(buf,tp['Poln']) nf = len(fghz) npol = len(poln) nant = len(tp['Antenna']) calfac = np.zeros((npol,nf,nant),'float') offsun = np.zeros((npol,nf,nant),'float') for i,ant in enumerate(tp['Antenna']): calfac[:,:,i] = stateframe.extract(buf,ant['Calfac']) offsun[:,:,i] = stateframe.extract(buf,ant['Offsun']) idx = np.isfinite(calfac[0,:,0]) return fghz[idx], calfac[:,idx,:], offsun[:,idx,:]
def override(tbl, bandlist, t=None): ''' When a table has bad attenuation for a given band, this command will replace the new values with the ones in the current DCM_master_table. Inputs: tbl A text string version of a table as returned by DCM_calnew() bandlist A simple list of band numbers (1 - 52) that will be replaced. t An optional Time() object giving a date/time of the table to override from. If omitted or None, the currently active table is used. Note, the exact date is not required. Any date after the desired SQL record will return the active table for that date. ''' import cal_header import stateframe xml, buf = cal_header.read_cal(2, t=t) cur_table = stateframe.extract(buf,xml['Attenuation']) if type(bandlist) != list: # If bandlist is not a list, it may be just a single band, so make it a list (of one). bandlist = [bandlist] for band in bandlist: # Format cur_table[band-1] as text line, and use it to replace tbl[band+2] line tbl[band+2] = tbl[band+2][:5]+('{:3d}'*30).format(*cur_table[band-1]) return tbl
def fem_attn_update(fem_attn, t=None, rdfromsql=True): '''Given a record of the frontend attenuation levels from the stateframe, recalculate the corrected attenuations levels. fem_attn_in: recorded attn levels in a 10-min duration fem_attn_bitv: complex corrections to be applied to the data. Read from the stateframe or provided as a (16, 2, 2, 5) array''' import cal_header as ch import stateframe as stf if rdfromsql: xml, buf = ch.read_cal(7,t) fem_attn_bitv=np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Real'])) + np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Imag'])) * 1j h1=fem_attn['h1'] h2=fem_attn['h2'] v1=fem_attn['v1'] v2=fem_attn['v2'] attn=np.concatenate((np.concatenate((h1[...,None],v1[...,None]),axis=2)[...,None], np.concatenate((h2[...,None],v2[...,None]),axis=2)[...,None]),axis=3) # Start with 0 attenuation as reference fem_attn_out=attn*0 # Calculate resulting attenuation based on bit attn values (1,2,4,8,16) for i in range(5): fem_attn_out = fem_attn_out + (np.bitwise_and(attn,2**i)>>i)*fem_attn_bitv[...,i] #fem_gain=10.**(-fem_gain_db/10.) return fem_attn_out
def read_dbcalfac(t): ''' Read the contents of a SOLPNT calibration file from the SQL database, and return fghz, calfac, offsun arrays ''' import stateframe import cal_header as ch try: tp, buf = ch.read_cal(1, t) except: print 'READ_DBCALFAC: Error reading calibration factors' return None, None, None fghz = stateframe.extract(buf, tp['FGHz']) poln = stateframe.extract(buf, tp['Poln']) nf = len(fghz) npol = len(poln) nant = len(tp['Antenna']) calfac = np.zeros((npol, nf, nant), 'float') offsun = np.zeros((npol, nf, nant), 'float') for i, ant in enumerate(tp['Antenna']): calfac[:, :, i] = stateframe.extract(buf, ant['Calfac']) offsun[:, :, i] = stateframe.extract(buf, ant['Offsun']) idx = np.isfinite(calfac[0, :, 0]) return fghz[idx], calfac[:, idx, :], offsun[:, idx, :]
def read_attncal(trange=None): ''' Given a timerange as a Time() object, read FEM attenuation records for each date from the SQL database, and return then as a list of attn dictionaries. To get values for only a single day, the trange Time() object can have the same time repeated, or can be a single time. Returns a list of dictionaries, each pertaining to one of the days in trange, with keys defined as follows: 'time': The start time of the GAINCALTEST scan, as a Time() object 'fghz': The list of frequencies [GHz] at which attenuations are measured 'attn': The array of attenuations [dB] of size (nattn, nant, npol, nf), where nattn = 8, nant = 13, npol = 2, and nf is variable ''' import cal_header as ch import stateframe as stf if trange is None: trange = Time.now() if type(trange.mjd) == np.float: # Interpret single time as both start and end time mjd1 = int(trange.mjd) mjd2 = mjd1 else: mjd1, mjd2 = trange.mjd.astype(int) attn = [] for mjd in range(mjd1, mjd2 + 1): # Read next earlier SQL entry from end of given UT day (mjd+0.999) xml, buf = ch.read_cal(7, t=Time(mjd + 0.999, format='mjd')) t = Time(stf.extract(buf, xml['Timestamp']), format='lv') fghz = stf.extract(buf, xml['FGHz']) nf = len(np.where(fghz != 0.0)[0]) fghz = fghz[:nf] attnvals = stf.extract(buf, xml['FEM_Attn_Real'])[:, :, :, :nf] attn.append({'time': t, 'fghz': fghz, 'attn': attnvals}) return attn
def find_calibrations(year, month): import calendar from util import Time import cal_header as ch import stateframe hc = calendar.HTMLCalendar(calendar.SUNDAY) html_table = hc.formatmonth(year, month) lines = html_table.split('\n') lines[2] = lines[2].replace('th class', 'th width="100" class') c = calendar.TextCalendar(calendar.SUNDAY) for i in c.itermonthdays(year, month): if i != 0: t = Time(str(year) + '-' + str(month) + '-' + str(i) + ' 20:00') cals = [] for caltype in [8, 9, 10]: xml, buf = ch.read_cal(caltype, t) if buf is None: cals.append(0) else: tout = Time(stateframe.extract(buf, xml['Timestamp']), format='lv') if (t - tout).value < 1. / 3: cals.append(1) else: cals.append(0) for k, line in enumerate(lines[3:]): idx = line.find(str(i)) ns = len(str(i)) if idx != -1: line = line[:idx + ns] + '<br>-r- -p- -tp- <br> {} {} {}'.format( *cals) + line[idx + ns:] break lines[k + 3] = line #print ''.join(line+'\n' for line in lines) return ''.join(line + '\n' for line in lines)
def get_calfac(t=None): ''' Read total power and auto-correlation calibration factors from the SQL database, for the time specified by Time() object t, or if None, at the next earlier calibration time to the current time. ''' tpcal_type = 10 # Calibration type specified in cal_header.py if t is None: t = Time.now() xml, buf = ch.read_cal(tpcal_type,t=t) fghz = stateframe.extract(buf,xml['FGHz']) nf = len(fghz) tpcalfac = np.zeros((13,2,nf),np.float) tpoffsun = np.zeros((13,2,nf),np.float) accalfac = np.zeros((13,2,nf),np.float) acoffsun = np.zeros((13,2,nf),np.float) nant = len(xml['Antenna']) for i in range(nant): iant = stateframe.extract(buf,xml['Antenna'][i]['Antnum'])-1 tpcalfac[iant] = stateframe.extract(buf,xml['Antenna'][i]['TPCalfac']) accalfac[iant] = stateframe.extract(buf,xml['Antenna'][i]['ACCalfac']) tpoffsun[iant] = stateframe.extract(buf,xml['Antenna'][i]['TPOffsun']) acoffsun[iant] = stateframe.extract(buf,xml['Antenna'][i]['ACOffsun']) return {'fghz':fghz,'timestamp':stateframe.extract(buf,xml['Timestamp']), 'tpcalfac':tpcalfac,'accalfac':accalfac,'tpoffsun':tpoffsun,'acoffsun':acoffsun}
def apply_attn_corr(data, tref=None): ''' Applys the attenuator state corrections to the given data dictionary, corrected to the gain-state at time given by Time() object tref. Inputs: data A dictionary returned by udb_util.py's readXdata(). tref A Time() object with the reference time, or if None, the gain state of the nearest earlier REFCAL is used. Output: cdata A dictionary with the gain-corrected data. The keys px, py, and x, are updated. NB: This is the same routine as in gaincal2.py, but modified to handle the different ordering/format of data from udb_util.py's readXdata() routine. ''' from gaincal2 import get_gain_state from util import common_val_idx, nearest_val_idx import copy if tref is None: # No reference time specified, so get nearest earlier REFCAL trange = Time(data['time'][[0, -1]], format='jd') xml, buf = ch.read_cal(8, t=trange[0]) tref = Time(stateframe.extract(buf, xml['Timestamp']), format='lv') # Get the gain state at the reference time (actually median over 1 minute) trefrange = Time([tref.iso, Time(tref.lv + 60, format='lv').iso]) ref_gs = get_gain_state(trefrange) # refcal gain state for 60 s # Get median of refcal gain state (which should be constant anyway) ref_gs['h1'] = np.median(ref_gs['h1'], 1) ref_gs['h2'] = np.median(ref_gs['h2'], 1) ref_gs['v1'] = np.median(ref_gs['v1'], 1) ref_gs['v2'] = np.median(ref_gs['v2'], 1) # Get timerange from data trange = Time([data['time'][0], data['time'][-1]], format='jd') # Get time cadence dt = np.int( np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400)) if dt == 1: dt = None # Get the gain state of the requested timerange src_gs = get_gain_state(trange, dt) # solar gain state for timerange of file nt = len(src_gs['times']) antgain = np.zeros((15, 2, 34, nt), np.float32) # Antenna-based gains vs. band for i in range(15): for j in range(34): antgain[i, 0, j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs[ 'h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][ i, 0, j] - ref_gs['dcmattn'][i, 0, j] antgain[i, 1, j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs[ 'v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][ i, 1, j] - ref_gs['dcmattn'][i, 1, j] cdata = copy.deepcopy(data) # Create giant array of baseline-based gains, translated to baselines and frequencies fghz = data['fghz'] nf = len(fghz) blist = (fghz * 2 - 1).astype( int) - 1 # Band list corresponding to frequencies in data blgain = np.zeros((nf, 136, 4, nt), float) # Baseline-based gains vs. frequency for k, bl in enumerate(get_bl_order()): i, j = bl if i < 15 and j < 15: blgain[:, k, 0] = 10**((antgain[i, 0, blist] + antgain[j, 0, blist]) / 20.) blgain[:, k, 1] = 10**((antgain[i, 1, blist] + antgain[j, 1, blist]) / 20.) blgain[:, k, 2] = 10**((antgain[i, 0, blist] + antgain[j, 1, blist]) / 20.) blgain[:, k, 3] = 10**((antgain[i, 1, blist] + antgain[j, 0, blist]) / 20.) # Reorder antgain axes to put frequencies in first slot, to match data antgain = np.swapaxes(np.swapaxes(antgain, 1, 2), 0, 1) antgainf = 10**(antgain[blist] / 10.) idx = nearest_val_idx(data['time'], src_gs['times'].jd) # Correct the auto- and cross-correlation data cdata['x'] *= blgain[:, :, :, idx] # Reshape px and py arrays cdata['px'].shape = (134, 16, 3, nt) cdata['py'].shape = (134, 16, 3, nt) # Correct the power cdata['px'][:, :15, 0] *= antgainf[:, :, 0, idx] cdata['py'][:, :15, 0] *= antgainf[:, :, 1, idx] # Correct the power-squared cdata['px'][:, :15, 1] *= antgainf[:, :, 0, idx]**2 cdata['py'][:, :15, 1] *= antgainf[:, :, 1, idx]**2 # Reshape px and py arrays back to original cdata['px'].shape = (134 * 16 * 3, nt) cdata['py'].shape = (134 * 16 * 3, nt) return cdata
def apply_attn_corr(data, tref=None): ''' Applys the attenuator state corrections to the given data dictionary, corrected to the gain-state at time given by Time() object tref. Inputs: data A dictionary returned by udb_util.py's readXdata(). tref A Time() object with the reference time, or if None, the gain state of the nearest earlier REFCAL is used. Output: cdata A dictionary with the gain-corrected data. The keys px, py, and x, are updated. NB: This is the same routine as in gaincal2.py, but modified to handle the different ordering/format of data from udb_util.py's readXdata() routine. ''' from gaincal2 import get_gain_state from util import common_val_idx, nearest_val_idx import copy if tref is None: # No reference time specified, so get nearest earlier REFCAL trange = Time(data['time'][[0,-1]],format='jd') xml, buf = ch.read_cal(8,t=trange[0]) tref = Time(stateframe.extract(buf,xml['Timestamp']),format='lv') # Get the gain state at the reference time (actually median over 1 minute) trefrange = Time([tref.iso,Time(tref.lv+60,format='lv').iso]) ref_gs = get_gain_state(trefrange) # refcal gain state for 60 s # Get median of refcal gain state (which should be constant anyway) ref_gs['h1'] = np.median(ref_gs['h1'],1) ref_gs['h2'] = np.median(ref_gs['h2'],1) ref_gs['v1'] = np.median(ref_gs['v1'],1) ref_gs['v2'] = np.median(ref_gs['v2'],1) # Get timerange from data trange = Time([data['time'][0],data['time'][-1]],format='jd') # Get time cadence dt = np.int(np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400)) if dt == 1: dt = None # Get the gain state of the requested timerange src_gs = get_gain_state(trange,dt) # solar gain state for timerange of file nt = len(src_gs['times']) antgain = np.zeros((15,2,34,nt),np.float32) # Antenna-based gains vs. band for i in range(15): for j in range(34): antgain[i,0,j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs['h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][i,0,j] - ref_gs['dcmattn'][i,0,j] antgain[i,1,j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs['v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][i,1,j] - ref_gs['dcmattn'][i,1,j] cdata = copy.deepcopy(data) # Create giant array of baseline-based gains, translated to baselines and frequencies fghz = data['fghz'] nf = len(fghz) blist = (fghz*2 - 1).astype(int) - 1 # Band list corresponding to frequencies in data blgain = np.zeros((nf,136,4,nt),float) # Baseline-based gains vs. frequency for k,bl in enumerate(get_bl_order()): i, j = bl if i < 15 and j < 15: blgain[:,k,0] = 10**((antgain[i,0,blist] + antgain[j,0,blist])/20.) blgain[:,k,1] = 10**((antgain[i,1,blist] + antgain[j,1,blist])/20.) blgain[:,k,2] = 10**((antgain[i,0,blist] + antgain[j,1,blist])/20.) blgain[:,k,3] = 10**((antgain[i,1,blist] + antgain[j,0,blist])/20.) # Reorder antgain axes to put frequencies in first slot, to match data antgain = np.swapaxes(np.swapaxes(antgain,1,2),0,1) antgainf = 10**(antgain[blist]/10.) idx = nearest_val_idx(data['time'],src_gs['times'].jd) # Correct the auto- and cross-correlation data cdata['x'] *= blgain[:,:,:,idx] # Reshape px and py arrays cdata['px'].shape = (134,16,3,nt) cdata['py'].shape = (134,16,3,nt) # Correct the power cdata['px'][:,:15,0] *= antgainf[:,:,0,idx] cdata['py'][:,:15,0] *= antgainf[:,:,1,idx] # Correct the power-squared cdata['px'][:,:15,1] *= antgainf[:,:,0,idx]**2 cdata['py'][:,:15,1] *= antgainf[:,:,1,idx]**2 # Reshape px and py arrays back to original cdata['px'].shape = (134*16*3,nt) cdata['py'].shape = (134*16*3,nt) return cdata
def unrot(data, azeldict=None): ''' Apply the correction to differential feed rotation to data, and return the corrected data. Inputs: data A dictionary returned by udb_util.py's readXdata(). azeldict The dictionary returned from get_sql_info(), or if None, the appropriate get_sql_info() call is done internally. Output: cdata A dictionary with the phase-corrected data. Only the key x is updated. ''' import copy from util import lobe trange = Time(data['time'][[0,-1]],format='jd') if azeldict is None: azeldict = get_sql_info(trange) chi = azeldict['ParallacticAngle'] # (nt, nant) # Correct parallactic angle for equatorial mounts, relative to Ant14 for i in [8,9,10,12,13]: chi[:,i] -= chi[:,13] # Ensure that nearest valid parallactic angle is used for times in the data good, = np.where(azeldict['ActualAzimuth'][0] != 0) tidx = nearest_val_idx(data['time'],azeldict['Time'][good].jd) # Read X-Y Delay phase from SQL database and get common frequencies xml, buf = ch.read_cal(11,t=trange[0]) fghz = stateframe.extract(buf,xml['FGHz']) good, = np.where(fghz != 0.) fghz = fghz[good] dph = stateframe.extract(buf,xml['XYphase']) dph = dph[:,good] fidx1, fidx2 = common_val_idx(data['fghz'],fghz,precision=4) missing = np.setdiff1d(np.arange(len(data['fghz'])),fidx1) nf, nbl, npol, nt = data['x'].shape nf = len(fidx1) # Correct data for X-Y delay phase for k,bl in enumerate(get_bl_order()): i, j = bl if i < 14 and j < 14 and i != j: a1 = lobe(dph[i,fidx2] - dph[j,fidx2]) a2 = -dph[j,fidx2] + np.pi/2 a3 = dph[i,fidx2] - np.pi/2 data['x'][fidx1,k,1] *= np.repeat(np.exp(1j*a1),nt).reshape(nf,nt) data['x'][fidx1,k,2] *= np.repeat(np.exp(1j*a2),nt).reshape(nf,nt) data['x'][fidx1,k,3] *= np.repeat(np.exp(1j*a3),nt).reshape(nf,nt) # Correct data for differential feed rotation cdata = copy.deepcopy(data) for n in range(nt): for k,bl in enumerate(get_bl_order()): i, j = bl if i < 14 and j < 14 and i != j: dchi = chi[n,i] - chi[n,j] cchi = np.cos(dchi) schi = np.sin(dchi) cdata['x'][:,k,0,n] = data['x'][:,k,0,n]*cchi + data['x'][:,k,3,n]*schi cdata['x'][:,k,2,n] = data['x'][:,k,2,n]*cchi + data['x'][:,k,1,n]*schi cdata['x'][:,k,3,n] = data['x'][:,k,3,n]*cchi - data['x'][:,k,0,n]*schi cdata['x'][:,k,1,n] = data['x'][:,k,1,n]*cchi - data['x'][:,k,2,n]*schi # Set flags for any missing frequencies (hopefully this also works when missing is np.array([])) cdata[missing] = np.ma.masked return cdata
def DCM_master_attn_cal(fseqfile=None, dcmattn=None, update=False): ''' New version of this command, which uses the power values in the 10gbe packet headers instead of the very slow measurement of the ADC levels themselves. This version only takes about 8 s! If update is True, it writes the results to the SQL database. Returns the DCM_master_table in the form of lines of text strings, with labels (handy for viewing). ''' import pcapture2 as p import dbutil as db import cal_header as ch import stateframe as stf bandlist = fseqfile2bandlist(fseqfile) if bandlist is None: print 'Must specify a frequency sequence.' return None # Make sure this sequence is actually running, or start it if not accini = stf.rd_ACCfile() if not fseq_is_running(fseqfile, accini): # Sequence is not running, so send ACC commands to start it send_cmds(['FSEQ-OFF'], accini) send_cmds(['FSEQ-INIT'], accini) send_cmds(['FSEQ-FILE ' + fseqfile], accini) send_cmds(['FSEQ-ON'], accini) bandlist2dcmtable(bandlist, toACC=True) time.sleep(3) if not fseq_is_running(fseqfile, accini): print 'Frequency sequence could not be started.' return None else: print 'Successfully started frequency sequence.' send_cmds(['dcmtable dcm.txt'], accini) send_cmds(['dcmauto-on'], accini) pwr = np.zeros((50, 8, 4), 'int') # Capture on eth2 interface command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000' p.sendcmd(command) # Capture on eth3 interface command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000' p.sendcmd(command) headers = p.list_header('/home/user/Python/dcm2.pcap') for line in headers: try: j, id, p1, p2, p3, p4 = np.array(map( int, line.split()))[[0, 3, 6, 7, 8, 9]] pwr[j, id] = (p1, p2, p3, p4) except: # This is to skip the non-data header lines in the list pass headers = p.list_header('/home/user/Python/dcm3.pcap') for line in headers: try: j, id, p1, p2, p3, p4 = np.array(map( int, line.split()))[[0, 3, 6, 7, 8, 9]] pwr[j, id] = (p1, p2, p3, p4) except: # This is to skip the non-data header lines in the list pass # Reshape to (slot, nant, npol) pwr.shape = (50, 16, 2) # # Read current frequency sequence from database # cursor = db.get_cursor() # query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc' # fseq, msg = db.do_query(cursor, query) # if msg == 'Success': # fseqlist = fseq['FSeqList'][::-1] # Reverse the order # bandlist = ((np.array(fseqlist)-0.44)*2).astype(int) # cursor.close() if dcmattn is None: # Read current DCM_master_table from database xml, buf = ch.read_cal(2) orig_table = stf.extract(buf, xml['Attenuation']) else: # DCM attenuation is set to a constant value so create a table of such values. orig_table = np.zeros((34, 30)) + dcmattn orig_table[:, 26:] = 0 # Order pwr values according to bandlist, taking median of any repeated values new_pwr = np.zeros((34, 16, 2)) for i in range(34): idx, = np.where(bandlist - 1 == i) if len(idx) > 0: new_pwr[i] = np.median(pwr[idx], 0) new_pwr.shape = (34, 32) # Now determine the change in attenuation needed to achieve a target # value of 1600. Eliminate last two entries, corresponding to Ant16 attn = np.log10(new_pwr[:, :-2] / 1600.) * 10. new_table = (np.clip(orig_table + attn, 0, 30) / 2).astype(int) * 2 DCMlines = [] DCMlines.append( '# Ant1 Ant2 Ant3 Ant4 Ant5 Ant6 Ant7 Ant8 Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15' ) DCMlines.append( '# X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y' ) DCMlines.append( '# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----' ) for band in range(1, 35): DCMlines.append( '{:2} : {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}' .format(band, *new_table[band - 1])) if update: msg = ch.dcm_master_table2sql(DCMlines) if msg: print 'Success' else: print 'Error writing table to SQL database!' return DCMlines
def cal_qual(t=None, savfig=True): ''' Check the quality of the total power and gain calibrations for a given date ''' import cal_header as ch from stateframe import extract import dump_tsys as dt import pipeline_cal as pc import matplotlib.pylab as plt import rstn from util import get_idbdir import socket if t is None: t = Time.now() mjd = t.mjd # First check whether the total power calibration is current caltype = 10 xml, buf = ch.read_cal(caltype, t=t) tp_mjd = Time(extract(buf, xml['SQL_timestamp']), format='lv').mjd if mjd - tp_mjd > 0.5: print 'CAL_QUAL: Warning, TP Calibration not (yet) available for this date.' # Find GCAL scan for this date fdb = dt.rd_fdb(Time(mjd, format='mjd')) gcidx, = np.where(fdb['PROJECTID'] == 'GAINCALTEST') if len(gcidx) == 1: datadir = get_idbdir(t) + fdb['FILE'][gcidx][0][3:11] + '/' # List of GCAL files gcalfile = [datadir + i for i in fdb['FILE'][gcidx]] else: print 'CAL_QUAL: Warning, no GAINCALTEST scan for this date. Will try using the GAINCALTEST from previous day.' fdb = dt.rd_fdb(Time(mjd - 1, format='mjd')) gcidx, = np.where(fdb['PROJECTID'] == 'GAINCALTEST') if len(gcidx) == 1: datadir = get_idbdir(t) # Add date path if on pipeline # if datadir.find('eovsa') != -1: datadir += fdb['FILE'][gcidx][0][3:11]+'/' host = socket.gethostname() if host == 'pipeline': datadir += fdb['FILE'][gcidx][0][3:11] + '/' # List of GCAL files gcalfile = [datadir + i for i in fdb['FILE'][gcidx]] else: print 'CAL_QUAL: Error, no GAINCALTEST scan for previous day.' return # Find SOLPNTCAL scan for this date fdb = dt.rd_fdb(Time(mjd, format='mjd')) gcidx, = np.where(fdb['PROJECTID'] == 'SOLPNTCAL') if len(gcidx) > 0: datadir = get_idbdir(t) # Add date path if on pipeline # if datadir.find('eovsa') != -1: datadir += fdb['FILE'][gcidx][0][3:11]+'/' host = socket.gethostname() if host == 'pipeline': datadir += fdb['FILE'][gcidx][0][3:11] + '/' # List of SOLPNTCAL files solpntfile = [datadir + i for i in fdb['FILE'][gcidx]] else: print 'CAL_QUAL: Error, no SOLPNTCAL scan(s) for this date.' return files = gcalfile + solpntfile outnames = [] for file in files: outnames.append( pc.udb_corr(file, calibrate=True, attncal=True, desat=True)) out = ri.read_idb(outnames, srcchk=False) nt = len(out['time']) nf = len(out['fghz']) tpfac = 500. / nf frq, flux = rstn.rd_rstnflux(t) s = rstn.rstn2ant(frq, flux, out['fghz'] * 1000., t) fluximg = s.repeat(nt).reshape(nf, nt) f, ax = plt.subplots(4, 7) f.set_size_inches(16, 7, forward=True) f.tight_layout(rect=[0.0, 0.0, 1, 0.95]) ax.shape = (2, 14) for i in range(13): for j in range(2): ax[j, i].imshow(out['p'][i, j], aspect='auto', origin='lower', vmax=np.max(s), vmin=0) ax[j, i].plot(np.clip(out['p'][i, j, int(nf / 3.)] / tpfac, 0, nf), linewidth=1) ax[j, i].plot(np.clip(out['p'][i, j, int(2 * nf / 3.)] / tpfac, 0, nf), linewidth=1) ax[j, i].set_title('Ant ' + str(i + 1) + [' X Pol', ' Y Pol'][j], fontsize=10) for j in range(2): ax[j, 13].imshow(fluximg, aspect='auto', origin='lower', vmax=np.max(s), vmin=0) ax[j, 13].set_title('RSTN Flux', fontsize=10) for i in range(13): for j in range(2): ax[j, i].plot(np.clip(fluximg[int(nf / 3.)] / tpfac, 0, nf), '--', linewidth=1, color='C0') ax[j, i].plot(np.clip(fluximg[int(2 * nf / 3.)] / tpfac, 0, nf), '--', linewidth=1, color='C1') f.suptitle('Total Power Calibration Quality for ' + t.iso[:10]) date = t.iso[:10].replace('-', '') if savfig: try: plt.savefig('/common/webplots/flaremon/daily/' + date[:4] + '/QUAL_' + date + 'TP.png') except: plt.savefig('/tmp/' + date[:4] + '/QUAL_' + date + 'TP.png') print 'The .png file could not be created in the /common/webplots/flaremon/daily/ folder.' print 'A copy was created in /tmp/.' f, ax = plt.subplots(4, 7) f.set_size_inches(16, 7, forward=True) f.tight_layout(rect=[0.0, 0.0, 1, 0.95]) ax.shape = (2, 14) for i in range(13): for j in range(2): ax[j, i].imshow(np.real(out['a'][i, j]), aspect='auto', origin='lower', vmax=np.max(s), vmin=0) ax[j, i].plot(np.clip(np.real(out['a'][i, j, int(nf / 3.)] / tpfac), 0, nf), linewidth=1) ax[j, i].plot(np.clip( np.real(out['a'][i, j, int(2 * nf / 3.)] / tpfac), 0, nf), linewidth=1) ax[j, i].set_title('Ant ' + str(i + 1) + [' X Pol', ' Y Pol'][j], fontsize=10) for j in range(2): ax[j, 13].imshow(fluximg, aspect='auto', origin='lower', vmax=np.max(s), vmin=0) ax[j, 13].set_title('RSTN Flux', fontsize=10) for i in range(13): for j in range(2): ax[j, i].plot(np.clip(fluximg[int(nf / 3.)] / tpfac, 0, nf), '--', linewidth=1, color='C0') ax[j, i].plot(np.clip(fluximg[int(2 * nf / 3.)] / tpfac, 0, nf), '--', linewidth=1, color='C1') f.suptitle('Cross-Power Calibration Quality for ' + t.iso[:10]) date = t.iso[:10].replace('-', '') if savfig: try: plt.savefig('/common/webplots/flaremon/daily/' + date[:4] + '/QUAL_' + date + 'XP.png') except: plt.savefig('/tmp/' + date[:4] + '/QUAL_' + date + 'XP.png') print 'The .png file could not be created in the /common/webplots/flaremon/daily/ folder.' print 'A copy was created in /tmp/.'
def apply_gain_corr(data, tref=None): ''' Applys the gain_state() corrections to the given data dictionary, corrected to the gain-state at time given by Time() object tref. Inputs: data A dictionary such as that returned by read_idb(). tref A Time() object with the reference time, or if None, the gain state of the nearest earlier REFCAL is used. Output: cdata A dictionary with the gain-corrected data. The keys p, x, p2, and a are all updated. ''' from util import common_val_idx, nearest_val_idx import copy if tref is None: # No reference time specified, so get nearest earlier REFCAL trange = Time(data['time'][[0, -1]], format='jd') xml, buf = ch.read_cal(8, t=trange[0]) if xml == {}: # No refcal for this date, so just use an early time as reference tref = Time(trange[0].iso[:10] + ' 13:30') else: tref = Time(stf.extract(buf, xml['Timestamp']), format='lv') # Get the gain state at the reference time (actually median over 1 minute) trefrange = Time([tref.iso, Time(tref.lv + 61, format='lv').iso]) ref_gs = get_gain_state(trefrange) # refcal gain state for 60 s # Get median of refcal gain state (which should be constant anyway) ref_gs['h1'] = np.median(ref_gs['h1'], 1) ref_gs['h2'] = np.median(ref_gs['h2'], 1) ref_gs['v1'] = np.median(ref_gs['v1'], 1) ref_gs['v2'] = np.median(ref_gs['v2'], 1) # Get timerange from data trange = Time([data['time'][0], data['time'][-1]], format='jd') # Get time cadence dt = np.int( np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400)) if dt == 1: dt = None # Get the gain state of the requested timerange src_gs = get_gain_state(trange, dt) # solar gain state for timerange of file nt = len(src_gs['times']) antgain = np.zeros((15, 2, 34, nt), np.float32) # Antenna-based gains vs. band for i in range(15): for j in range(34): antgain[i, 0, j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs[ 'h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][ i, 0, j] - ref_gs['dcmattn'][i, 0, j] antgain[i, 1, j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs[ 'v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][ i, 1, j] - ref_gs['dcmattn'][i, 1, j] cdata = copy.deepcopy(data) # Frequency list is provided, so produce baseline-based gain table as well # Create giant array of gains, translated to baselines and frequencies fghz = data['fghz'] nf = len(fghz) blist = (fghz * 2 - 1).astype(int) - 1 blgain = np.zeros((120, 4, nf, nt), float) # Baseline-based gains vs. frequency for i in range(14): for j in range(i + 1, 15): blgain[ri.bl2ord[i, j], 0] = 10**((antgain[i, 0, blist] + antgain[j, 0, blist]) / 20.) blgain[ri.bl2ord[i, j], 1] = 10**((antgain[i, 1, blist] + antgain[j, 1, blist]) / 20.) blgain[ri.bl2ord[i, j], 2] = 10**((antgain[i, 0, blist] + antgain[j, 1, blist]) / 20.) blgain[ri.bl2ord[i, j], 3] = 10**((antgain[i, 1, blist] + antgain[j, 0, blist]) / 20.) antgainf = 10**(antgain[:, :, blist] / 10.) #idx1, idx2 = common_val_idx(data['time'],src_gs['times'].jd) idx = nearest_val_idx(data['time'], src_gs['times'].jd) # Apply corrections (some times may be eliminated from the data) # Correct the cross-correlation data cdata['x'] *= blgain[:, :, :, idx] # Correct the power cdata['p'][:15] *= antgainf[:, :, :, idx] # Correct the autocorrelation cdata['a'][:15, :2] *= antgainf[:, :, :, idx] cross_fac = np.sqrt(antgainf[:, 0] * antgainf[:, 1]) cdata['a'][:15, 2] *= cross_fac[:, :, idx] cdata['a'][:15, 3] *= cross_fac[:, :, idx] # Correct the power-squared -- this should preserve SK cdata['p2'][:15] *= antgainf[:, :, :, idx]**2 # Remove any uncorrected times before returning #cdata['time'] = cdata['time'][idx1] #cdata['p'] = cdata['p'][:,:,:,idx1] #cdata['a'] = cdata['a'][:,:,:,idx1] #cdata['p2'] = cdata['p2'][:,:,:,idx1] #cdata['ha'] = cdata['ha'][idx1] #cdata['m'] = cdata['m'][:,:,:,idx1] return cdata
def get_fem_level(trange, dt=None): ''' Get FEM attenuation levels for a given timerange. Returns a dictionary with keys as follows: times: A Time object containing the array of times, size (nt) hlev: The FEM attenuation level for HPol, size (nt, 15) vlev: The FEM attenuation level for VPol, size (nt, 15) dcmattn: The base DCM attenuations for 34 bands x 15 antennas x 2 Poln, size (34,30) The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc. dcmoff: If DPPoffset-on is 0, this is None (meaning there are no changes to the above base attenuations). If DPPoffset-on is 1, then dcmoff is a table of offsets to the base attenuation, size (nt, 50). The offset applies to all antennas/polarizations. Optional keywords: dt Seconds between entries to read from SQL stateframe database. If omitted, 1 s is assumed. ''' if dt is None: tstart, tend = [str(i) for i in trange.lv] else: # Expand time by 1/2 of dt before and after tstart = str(np.round(trange[0].lv - dt / 2)) tend = str(np.round(trange[1].lv + dt / 2)) cursor = db.get_cursor() ver = db.find_table_version(cursor, trange[0].lv) # Get front end attenuator states query = 'select Timestamp,Ante_Fron_FEM_Clockms,' \ +'Ante_Fron_FEM_HPol_Regi_Level,Ante_Fron_FEM_VPol_Regi_Level from fV' \ +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp' data, msg = db.do_query(cursor, query) if msg == 'Success': if dt: # If we want other than full cadence, get new array shapes and times n = len(data['Timestamp']) # Original number of times new_n = ( n / 15 / dt ) * 15 * dt # Truncated number of times equally divisible by dt new_shape = (n / 15 / dt, dt, 15) # New shape of truncated arrays times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt], format='lv') else: times = Time(data['Timestamp'].astype('int')[::15], format='lv') hlev = data['Ante_Fron_FEM_HPol_Regi_Level'] vlev = data['Ante_Fron_FEM_VPol_Regi_Level'] ms = data['Ante_Fron_FEM_Clockms'] nt = len(hlev) / 15 hlev.shape = (nt, 15) vlev.shape = (nt, 15) ms.shape = (nt, 15) # Find any entries for which Clockms is zero, which indicates where no # gain-state measurement is available. for i in range(15): bad, = np.where(ms[:, i] == 0) if bad.size != 0 and bad.size != nt: # Find nearest adjacent good value good, = np.where(ms[:, i] != 0) idx = nearest_val_idx(bad, good) hlev[bad, i] = hlev[good[idx], i] vlev[bad, i] = vlev[good[idx], i] if dt: # If we want other than full cadence, find mean over dt measurements hlev = np.mean(hlev[:new_n / 15].reshape(new_shape), 1) vlev = np.mean(vlev[:new_n / 15].reshape(new_shape), 1) # Put results in canonical order [nant, nt] hlev = hlev.T vlev = vlev.T else: print 'Error reading FEM levels:', msg return {} # Get back end attenuator states xml, buf = ch.read_cal(2, t=trange[0]) dcmattn = stf.extract(buf, xml['Attenuation']) dcmattn.shape = (34, 15, 2) # Put into canonical order [nant, npol, nband] dcmattn = np.moveaxis(dcmattn, 0, 2) # See if DPP offset is enabled query = 'select Timestamp,DPPoffsetattn_on from fV' \ +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp' data, msg = db.do_query(cursor, query) if msg == 'Success': dppon = data['DPPoffsetattn_on'] if np.where(dppon > 0)[0].size == 0: dcm_off = None else: query = 'select Timestamp,DCMoffset_attn from fV' \ +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp' data, msg = db.do_query(cursor, query) if msg == 'Success': otimes = Time(data['Timestamp'].astype('int')[::15], format='lv') dcmoff = data['DCMoffset_attn'] dcmoff.shape = (nt, 50) # We now have a time-history of offsets, at least some of which are non-zero. # Offsets by slot number do us no good, so we need to translate to band number. # Get fseqfile name at mean of timerange, from stateframe SQL database fseqfile = get_fseqfile( Time(int(np.mean(trange.lv)), format='lv')) if fseqfile is None: print 'Error: No active fseq file.' dcm_off = None else: # Get fseqfile from ACC and return bandlist bandlist = fseqfile2bandlist(fseqfile) # Use bandlist to covert nt x 50 array to nt x 34 band array of DCM attn offsets # Note that this assumes DCM offset is the same for any multiply-sampled bands # in the sequence. dcm_off = np.zeros((nt, 34), float) dcm_off[:, bandlist - 1] = dcmoff # Put into canonical order [nband, nt] dcm_off = dcm_off.T if dt: # If we want other than full cadence, find mean over dt measurements new_nt = len(times) dcm_off = dcm_off[:, :new_nt * dt] dcm_off.shape = (34, dt, new_nt) dcm_off = np.mean(dcm_off, 1) else: print 'Error reading DCM attenuations:', msg dcm_off = None else: print 'Error reading DPPon state:', msg dcm_off = None cursor.close() return { 'times': times, 'hlev': hlev.astype(int), 'vlev': vlev.astype(int), 'dcmattn': dcmattn, 'dcmoff': dcm_off }
def use_date(self, event): ''' When user has selected a date (via <Return> in date box) this function verifies that the date is good, and if so, first checks for calibration results in the SQL database and lists dates and times of phase calibration observations in the scan box. For those scans with SQL calibrations, a notation '*' is made on the line. ''' refcal_type = 8 phacal_type = 9 w = event.widget self.scan_selected = None self.ref_selected = None try: mjd = Time(w.get()).mjd except: self.pc_scanbox.delete(0, Tk.END) self.pc_scanbox.insert(Tk.END, 'Error: Invalid Date. Must be YYYY-MM=DD') return trange = Time([mjd+0.25,mjd+1.25],format='mjd') self.scan_dict = findscans(trange) sd = self.scan_dict self.pc_scanbox.delete(0, Tk.END) if sd['msg'] != 'Success': self.pc_scanbox.insert(Tk.END, sd['msg']) return self.pc_scanbox.insert(Tk.END, 'Time SQL Time Source Duration [*]') self.pc_scanbox.insert(Tk.END, '-------- -------- -------- -------- ---') self.pc_dictlist = [] self.saved = [] for i in range(len(sd['Timestamp'])): st_time = Time(sd['Timestamp'][i],format='lv') en_time = Time(sd['Timestamp'][i]+sd['duration'][i]*60.,format='lv') line = st_time.iso[11:19] + ' ' + sd['SourceID'][i] + '{:6.1f} m '.format(sd['duration'][i]) # This scan is not a REFCAL unless proven otherwise not_a_refcal = True # This scan is not a PHACAL unless proven otherwise not_a_phacal = True # See if results exist in SQL database try: xml, buf = ch.read_cal(refcal_type, t=en_time) #refcal_time = Time(extract(buf,xml['Timestamp']),format='lv') # Mid-time of data refcal_time = Time((extract(buf,xml['T_beg'])+extract(buf,xml['T_end']))/2,format='lv') # Mid-time of data dtr1 = st_time - refcal_time # negative if in scan dtr2 = en_time - refcal_time # positive if in scan if dtr1.jd < 0 and dtr2.jd > 0: line += ' R' SQL_time = Time(extract(buf,xml['SQL_timestamp']),format='lv').iso[10:19] line = line.replace(' ',SQL_time+' ') x = extract(buf,xml['Refcal_Real']) + 1j*extract(buf,xml['Refcal_Imag']) sigma = extract(buf,xml['Refcal_Sigma']) flags = extract(buf,xml['Refcal_Flag']) fghz = extract(buf,xml['Fghz']) self.pc_dictlist.append({'refcal_time':refcal_time, 'fghz':fghz, 'sigma':sigma, 'x':x, 'flags':flags}) self.saved.append(True) not_a_refcal = False except: pass if not_a_refcal: try: xml, buf = ch.read_cal(phacal_type, t=en_time) phacal_time = Time(extract(buf,xml['Timestamp']),format='lv') # Mid-time of data dtp1 = st_time - phacal_time dtp2 = en_time - phacal_time if dtp1.jd < 0 and dtp2.jd > 0: line += ' P' SQL_time = Time(extract(buf,xml['SQL_timestamp']),format='lv').iso[10:19] line = line.replace(' ',SQL_time+' ') x = extract(buf,xml['Phacal_Amp'])*np.exp(1j*extract(buf,xml['Phacal_Pha'])) sigma = extract(buf,xml['Phacal_Sigma']) flags = extract(buf,xml['Phacal_Flag']) fghz = extract(buf,xml['Fghz']) mbd = extract(buf,xml['MBD']) mbd_flag = extract(buf,xml['Flag']) self.pc_dictlist.append({'fghz':fghz, 'sigma':sigma, 'x':x, 'flags':flags, 'mbd':mbd[:,:,1], 'offsets':mbd[:,:,0], 'mbd_flag':mbd_flag}) self.saved.append(True) not_a_phacal = False except: pass if not_a_refcal and not_a_phacal: # Neither refcal nor phacal exists for this time, so set empty dictionary self.pc_dictlist.append({}) self.saved.append(False) nscans = len(self.pc_dictlist) self.pc_scanbox.insert(Tk.END, line)
def get_gain_state(trange, dt=None, relax=False): ''' Get all gain-state information for a given timerange. Returns a dictionary with keys as follows: times: A Time object containing the array of times, size (nt) h1: The first HPol attenuator value for 15 antennas, size (nt, 15) v1: The first VPol attenuator value for 15 antennas, size (nt, 15) h2: The second HPol attenuator value for 15 antennas, size (nt, 15) v2: The second VPol attenuator value for 15 antennas, size (nt, 15) dcmattn: The base DCM attenuations for nbands x 15 antennas x 2 Poln, size (34 or 52,30) The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc. dcmoff: If DPPoffset-on is 0, this is None (meaning there are no changes to the above base attenuations). If DPPoffset-on is 1, then dcmoff is a table of offsets to the base attenuation, size (nt, 50). The offset applies to all antennas/polarizations. Optional keywords: dt Seconds between entries to read from SQL stateframe database. If omitted, 1 s is assumed. relax Used for gain of reference time, in case there are no SQL data for the requested time. In that case it finds the data for the nearest later time. ''' if dt is None: tstart, tend = [str(i) for i in trange.lv] else: # Expand time by 1/2 of dt before and after tstart = str(np.round(trange[0].lv - dt / 2)) tend = str(np.round(trange[1].lv + dt / 2)) cursor = db.get_cursor() ver = db.find_table_version(cursor, trange[0].lv) # Get front end attenuator states # Attempt to solve the problem if there are no data if relax: # Special case of reference gain, where we want the first nt records after tstart, in case there # are no data at time tstart nt = int(float(tend) - float(tstart) - 1) * 15 query = 'select top '+str(nt)+' Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \ +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \ +ver+'_vD15 where Timestamp >= '+tstart+' order by Timestamp' else: query = 'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \ +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \ +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp < '+tend+' order by Timestamp' #if dt: # # If dt (seconds between measurements) is set, add appropriate SQL statement to query # query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 ' data, msg = db.do_query(cursor, query) if msg == 'Success': if dt: # If we want other than full cadence, get new array shapes and times n = len(data['Timestamp']) # Original number of times new_n = ( n / 15 / dt ) * 15 * dt # Truncated number of times equally divisible by dt new_shape = (n / 15 / dt, dt, 15) # New shape of truncated arrays times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt], format='lv') else: times = Time(data['Timestamp'].astype('int')[::15], format='lv') # Change tstart and tend to correspond to actual times from SQL tstart, tend = [str(i) for i in times[[0, -1]].lv] h1 = data['Ante_Fron_FEM_HPol_Atte_First'] h2 = data['Ante_Fron_FEM_HPol_Atte_Second'] v1 = data['Ante_Fron_FEM_VPol_Atte_First'] v2 = data['Ante_Fron_FEM_VPol_Atte_Second'] ms = data['Ante_Fron_FEM_Clockms'] nt = len(h1) / 15 h1.shape = (nt, 15) h2.shape = (nt, 15) v1.shape = (nt, 15) v2.shape = (nt, 15) ms.shape = (nt, 15) # Find any entries for which Clockms is zero, which indicates where no # gain-state measurement is available. for i in range(15): bad, = np.where(ms[:, i] == 0) if bad.size != 0 and bad.size != nt: # Find nearest adjacent good value good, = np.where(ms[:, i] != 0) idx = nearest_val_idx(bad, good) h1[bad, i] = h1[good[idx], i] h2[bad, i] = h2[good[idx], i] v1[bad, i] = v1[good[idx], i] v2[bad, i] = v2[good[idx], i] if dt: # If we want other than full cadence, find mean over dt measurements h1 = np.mean(h1[:new_n / 15].reshape(new_shape), 1) h2 = np.mean(h2[:new_n / 15].reshape(new_shape), 1) v1 = np.mean(v1[:new_n / 15].reshape(new_shape), 1) v2 = np.mean(v2[:new_n / 15].reshape(new_shape), 1) # Put results in canonical order [nant, nt] h1 = h1.T h2 = h2.T v1 = v1.T v2 = v2.T else: print 'Error reading FEM attenuations:', msg return {} # Get back end attenuator states xml, buf = ch.read_cal(2, t=trange[0]) dcmattn = stf.extract(buf, xml['Attenuation']) nbands = dcmattn.shape[0] dcmattn.shape = (nbands, 15, 2) # Put into canonical order [nant, npol, nband] dcmattn = np.moveaxis(dcmattn, 0, 2) # See if DPP offset is enabled query = 'select Timestamp,DPPoffsetattn_on from fV' \ +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp' data, msg = db.do_query(cursor, query) if msg == 'Success': dppon = data['DPPoffsetattn_on'] if np.where(dppon > 0)[0].size == 0: dcm_off = None else: query = 'select Timestamp,DCMoffset_attn from fV' \ +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend #if dt: # # If dt (seconds between measurements) is set, add appropriate SQL statement to query # query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 ' query += ' order by Timestamp' data, msg = db.do_query(cursor, query) if msg == 'Success': otimes = Time(data['Timestamp'].astype('int')[::15], format='lv') dcmoff = data['DCMoffset_attn'] dcmoff.shape = (nt, 50) # We now have a time-history of offsets, at least some of which are non-zero. # Offsets by slot number do us no good, so we need to translate to band number. # Get fseqfile name at mean of timerange, from stateframe SQL database fseqfile = get_fseqfile( Time(int(np.mean(trange.lv)), format='lv')) if fseqfile is None: print 'Error: No active fseq file.' dcm_off = None else: # Get fseqfile from ACC and return bandlist bandlist = fseqfile2bandlist(fseqfile) nbands = len(bandlist) # Use bandlist to covert nt x 50 array to nt x nbands array of DCM attn offsets # Note that this assumes DCM offset is the same for any multiply-sampled bands # in the sequence. dcm_off = np.zeros((nt, nbands), float) dcm_off[:, bandlist - 1] = dcmoff # Put into canonical order [nband, nt] dcm_off = dcm_off.T if dt: # If we want other than full cadence, find mean over dt measurements new_nt = len(times) dcm_off = dcm_off[:, :new_nt * dt] dcm_off.shape = (nbands, dt, new_nt) dcm_off = np.mean(dcm_off, 1) else: print 'Error reading DCM attenuations:', msg dcm_off = None else: print 'Error reading DPPon state:', msg dcm_off = None cursor.close() return { 'times': times, 'h1': h1, 'v1': v1, 'h2': h2, 'v2': v2, 'dcmattn': dcmattn, 'dcmoff': dcm_off }
def unrot(data, azeldict=None): ''' Apply the correction to differential feed rotation to data, and return the corrected data. Inputs: data A dictionary returned by udb_util.py's readXdata(). azeldict The dictionary returned from get_sql_info(), or if None, the appropriate get_sql_info() call is done internally. Output: cdata A dictionary with the phase-corrected data. Only the key x is updated. ''' import copy from util import lobe trange = Time(data['time'][[0, -1]], format='jd') if azeldict is None: azeldict = get_sql_info(trange) chi = azeldict['ParallacticAngle'] # (nt, nant) # Correct parallactic angle for equatorial mounts, relative to Ant14 for i in [8, 9, 10, 12, 13]: chi[:, i] -= chi[:, 13] # Ensure that nearest valid parallactic angle is used for times in the data good, = np.where(azeldict['ActualAzimuth'][0] != 0) tidx = nearest_val_idx(data['time'], azeldict['Time'][good].jd) # Read X-Y Delay phase from SQL database and get common frequencies xml, buf = ch.read_cal(11, t=trange[0]) fghz = stateframe.extract(buf, xml['FGHz']) good, = np.where(fghz != 0.) fghz = fghz[good] dph = stateframe.extract(buf, xml['XYphase']) dph = dph[:, good] fidx1, fidx2 = common_val_idx(data['fghz'], fghz, precision=4) missing = np.setdiff1d(np.arange(len(data['fghz'])), fidx1) nf, nbl, npol, nt = data['x'].shape nf = len(fidx1) # Correct data for X-Y delay phase for k, bl in enumerate(get_bl_order()): i, j = bl if i < 14 and j < 14 and i != j: a1 = lobe(dph[i, fidx2] - dph[j, fidx2]) a2 = -dph[j, fidx2] + np.pi / 2 a3 = dph[i, fidx2] - np.pi / 2 data['x'][fidx1, k, 1] *= np.repeat(np.exp(1j * a1), nt).reshape(nf, nt) data['x'][fidx1, k, 2] *= np.repeat(np.exp(1j * a2), nt).reshape(nf, nt) data['x'][fidx1, k, 3] *= np.repeat(np.exp(1j * a3), nt).reshape(nf, nt) # Correct data for differential feed rotation cdata = copy.deepcopy(data) for n in range(nt): for k, bl in enumerate(get_bl_order()): i, j = bl if i < 14 and j < 14 and i != j: dchi = chi[n, i] - chi[n, j] cchi = np.cos(dchi) schi = np.sin(dchi) cdata['x'][:, k, 0, n] = data['x'][:, k, 0, n] * cchi + data['x'][:, k, 3, n] * schi cdata['x'][:, k, 2, n] = data['x'][:, k, 2, n] * cchi + data['x'][:, k, 1, n] * schi cdata['x'][:, k, 3, n] = data['x'][:, k, 3, n] * cchi - data['x'][:, k, 0, n] * schi cdata['x'][:, k, 1, n] = data['x'][:, k, 1, n] * cchi - data['x'][:, k, 2, n] * schi # Set flags for any missing frequencies (hopefully this also works when missing is np.array([])) cdata[missing] = np.ma.masked return cdata
def DCM_master_attn_cal(update=False): ''' New version of this command, which uses the power values in the 10gbe packet headers instead of the very slow measurement of the ADC levels themselves. This version only takes about 8 s! If update is True, it writes the results to the SQL database. Returns the DCM_master_table in the form of lines of text strings, with labels (handy for viewing). ''' pwr = np.zeros((50,8,4),'int') # Capture on eth2 interface command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000' p.sendcmd(command) # Capture on eth3 interface command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000' p.sendcmd(command) headers = p.list_header('/home/user/Python/dcm2.pcap') for line in headers: try: j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]] pwr[j,id] = (p1, p2, p3, p4) except: # This is to skip the non-data header lines in the list pass headers = p.list_header('/home/user/Python/dcm3.pcap') for line in headers: try: j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]] pwr[j,id] = (p1, p2, p3, p4) except: # This is to skip the non-data header lines in the list pass # Reshape to (slot, nant, npol) pwr.shape = (50,16,2) # Read current frequency sequence from database cursor = db.get_cursor() query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc' fseq, msg = db.do_query(cursor, query) if msg == 'Success': fseqlist = fseq['FSeqList'][::-1] # Reverse the order bandlist = ((np.array(fseqlist)-0.44)*2).astype(int) cursor.close() # Read current DCM_master_table from database xml, buf = ch.read_cal(2) orig_table = stf.extract(buf,xml['Attenuation']) # Order pwr values according to bandlist, taking median of any repeated values new_pwr = np.zeros((34,16,2)) for i in range(34): idx, = np.where(bandlist-1 == i) if len(idx) > 0: new_pwr[i] = np.median(pwr[idx],0) new_pwr.shape = (34,32) # Now determine the change in attenuation needed to achieve a target # value of 1600. Eliminate last two entries, corresponding to Ant16 attn = np.log10(new_pwr[:,:-2]/1600.)*10. new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2 DCMlines = [] DCMlines.append('# Ant1 Ant2 Ant3 Ant4 Ant5 Ant6 Ant7 Ant8 Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15') DCMlines.append('# X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y X Y') DCMlines.append('# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----') for band in range(1,35): DCMlines.append('{:2} : {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1])) if update: msg = ch.dcm_master_table2sql(DCMlines) if msg: print 'Success' else: print 'Error writing table to SQL database!' return DCMlines
def unrot(data, azeldict=None): ''' Apply the correction to differential feed rotation to data, and return the corrected data. This also applies flags to data whose antennas are not tracking. Inputs: data A dictionary returned by udb_util.py's readXdata(). azeldict The dictionary returned from get_sql_info(), or if None, the appropriate get_sql_info() call is done internally. Output: cdata A dictionary with the phase-corrected data. Only the key x is updated. ''' import copy from util import lobe, bl2ord trange = Time(data['time'][[0, -1]], format='jd') if azeldict is None: azeldict = get_sql_info(trange) chi = azeldict['ParallacticAngle'] * np.pi / 180. # (nt, nant) # Correct parallactic angle for equatorial mounts, relative to Ant14 chi[:, [8, 9, 10, 12, 13]] = 0 # Currently 0, but can be measured and updated # Which antennas are tracking track = azeldict['TrackFlag'] # True if tracking # Ensure that nearest valid parallactic angle is used for times in the data good = np.where(azeldict['ActualAzimuth'] != 0) tidx = [] # List of arrays of indexes for each antenna for i in range(14): gd = good[0][np.where(good[1] == i)] tidx.append(nearest_val_idx(data['time'], azeldict['Time'][gd].jd)) # Read X-Y Delay phase from SQL database and get common frequencies xml, buf = ch.read_cal(11, t=trange[0]) fghz = stateframe.extract(buf, xml['FGHz']) good, = np.where(fghz != 0.) fghz = fghz[good] dph = stateframe.extract(buf, xml['XYphase']) dph = dph[:, good] xi_rot = stateframe.extract(buf, xml['Xi_Rot']) xi_rot = xi_rot[good] fidx1, fidx2 = common_val_idx(data['fghz'], fghz, precision=4) missing = np.setdiff1d(np.arange(len(data['fghz'])), fidx1) nf, nbl, npol, nt = data['x'].shape nf = len(fidx1) # Correct data for X-Y delay phase for i in range(13): for j in range(i + 1, 14): k = bl2ord[i, j] a1 = lobe(dph[i, fidx2] - dph[j, fidx2]) a2 = -dph[j, fidx2] - xi_rot[fidx2] a3 = dph[i, fidx2] - xi_rot[fidx2] + np.pi data['x'][fidx1, k, 1] *= np.repeat(np.exp(1j * a1), nt).reshape(nf, nt) data['x'][fidx1, k, 2] *= np.repeat(np.exp(1j * a2), nt).reshape(nf, nt) data['x'][fidx1, k, 3] *= np.repeat(np.exp(1j * a3), nt).reshape(nf, nt) # Correct data for differential feed rotation cdata = copy.deepcopy(data) for n in range(nt): for i in range(13): for j in range(i + 1, 14): k = bl2ord[i, j] ti = tidx[i][n] tj = tidx[j][n] if track[ti, i] and track[tj, j]: dchi = chi[ti, i] - chi[tj, j] cchi = np.cos(dchi) schi = np.sin(dchi) cdata['x'][:, k, 0, n] = data['x'][:, k, 0, n] * cchi + data['x'][:, k, 3, n] * schi cdata['x'][:, k, 2, n] = data['x'][:, k, 2, n] * cchi + data['x'][:, k, 1, n] * schi cdata['x'][:, k, 3, n] = data['x'][:, k, 3, n] * cchi - data['x'][:, k, 0, n] * schi cdata['x'][:, k, 1, n] = data['x'][:, k, 1, n] * cchi - data['x'][:, k, 2, n] * schi else: cdata['x'][:, k, :, n] = np.ma.masked # Set flags for any missing frequencies (hopefully this also works when "missing" is np.array([])) cdata['x'][missing] = np.ma.masked return cdata
def gain_state(trange=None): ''' Read and assemble the gain state for the given timerange from the SQL database, or for the last 10 minutes if trange is None. Returns the complex attenuation of the FEM for the timerange as an array of size (nant, npol, ntimes) [not band dependent], and the complex attenuation of the DCM for the same timerange as an array of size (nant, npol, nbands, ntimes). Also returns the time as a Time() object array. ''' from util import Time import dbutil as db from fem_attn_calib import fem_attn_update import cal_header as ch if trange is None: t = Time.now() t2 = Time(t.jd - 600. / 86400., format='jd') trange = Time([t2.iso, t.iso]) ts = trange[0].lv # Start timestamp te = trange[1].lv # End timestamp cursor = db.get_cursor() # First get FEM attenuation for timerange D15dict = db.get_dbrecs(cursor, dimension=15, timestamp=trange) DCMoffdict = db.get_dbrecs(cursor, dimension=50, timestamp=trange) DCMoff_v_slot = DCMoffdict['DCMoffset_attn'] # DCMoff_0 = D15dict['DCM_Offset_Attn'][:,0] # All ants are the same fem_attn = {} fem_attn['timestamp'] = D15dict['Timestamp'][:, 0] nt = len(fem_attn['timestamp']) junk = np.zeros([nt, 1], dtype='int') #add the non-existing antenna 16 fem_attn['h1'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_First'], junk, axis=1) #FEM hpol first attn value fem_attn['h2'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_Second'], junk, axis=1) #FEM hpol second attn value fem_attn['v1'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_First'], junk, axis=1) #FEM vpol first attn value fem_attn['v2'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_Second'], junk, axis=1) #FEM vpol second attn value fem_attn['ants'] = np.append(D15dict['I15'][0, :], [15]) # Add corrections from SQL database for start time of timerange fem_attn_corr = fem_attn_update(fem_attn, trange[0]) # Next get DCM attenuation for timerange # Getting next earlier scan header ver = db.find_table_version(cursor, ts, True) query = 'select top 50 Timestamp,FSeqList from hV' + ver + '_vD50 where Timestamp <= ' + str( ts) + ' order by Timestamp desc' fseq, msg = db.do_query(cursor, query) if msg == 'Success': fseqlist = fseq['FSeqList'][::-1] # Reverse the order bandlist = ((np.array(fseqlist) - 0.44) * 2).astype(int) cursor.close() # Read current DCM_table from database xml, buf = ch.read_cal(3, trange[0]) orig_table = stf.extract(buf, xml['Attenuation']).astype('int') orig_table.shape = (50, 15, 2) xml, buf = ch.read_cal(6, trange[0]) dcm_attn_bitv = np.nan_to_num(stf.extract( buf, xml['DCM_Attn_Real'])) + np.nan_to_num( stf.extract(buf, xml['DCM_Attn_Imag'])) * 1j # # Add one more bit (all zeros) to take care of unit bit # dcm_attn_bitv = np.concatenate((np.zeros((16,2,1),'int'),dcm_attn_bitv),axis=2) # We now have: # orig_table the original DCM at start of scan, size (nslot, nant=15, npol) # DCMoff_0 the offset applied to all antennas and slots (ntimes) # DCMoff_v_slot the offest applied to all antennas but varies by slot (ntimes, nslot) # dcm_attn_bitv the measured (non-nominal) attenuations for each bit value (nant=16, npol, nbit) -- complex # Now I need to convert slot to band, add appropriately, and organize as (nant=16, npol, nband, ntimes) # Add one more antenna (all zeros) to orig_table orig_table = np.concatenate((orig_table, np.zeros((50, 1, 2), 'int')), axis=1) ntimes, nslot = DCMoff_v_slot.shape dcm_attn = np.zeros((16, 2, 34, ntimes), np.int) for i in range(ntimes): for j in range(50): idx = bandlist[j] - 1 # This adds attenuation for repeated bands--hopefully the same value for each repeat dcm_attn[:, :, idx, i] += orig_table[j, :, :] + DCMoff_v_slot[i, j] # Normalize repeated bands by finding number of repeats and dividing. for i in range(1, 35): n = len(np.where(bandlist == i)[0]) if n > 1: dcm_attn[:, :, i - 1, :] /= n # Make sure attenuation is in range dcm_attn = np.clip(dcm_attn, 0, 30) # Finally, correct for non-nominal (measured) bit values # Start with 0 attenuation as reference dcm_attn_corr = dcm_attn * (0 + 0j) att = np.zeros((16, 2, 34, ntimes, 5), np.complex) # Calculate resulting attenuation based on bit attn values (2,4,8,16) for i in range(4): # Need dcm_attn_bitv[...,i] to be same shape as dcm_attn bigger_bitv = np.broadcast_to(dcm_attn_bitv[..., i], (ntimes, 34, 16, 2)) bigger_bitv = np.swapaxes( np.swapaxes(np.swapaxes(bigger_bitv, 0, 3), 1, 2), 0, 1) att[..., i] = (np.bitwise_and(dcm_attn, 2**(i + 1)) >> (i + 1)) * bigger_bitv dcm_attn_corr = dcm_attn_corr + att[..., i] # Move ntimes column to next to last position, and then sum over last column (the two attenuators) fem_attn_corr = np.sum(np.rollaxis(fem_attn_corr, 0, 3), 3) # Output is FEM shape (nant, npol, ntimes) = (16, 2, ntimes) # DCM shape (nant, npol, nband, ntimes) = (16, 2, 34, ntimes) # Arrays are complex, in dB units tjd = Time(fem_attn['timestamp'].astype('int'), format='lv').jd return fem_attn_corr, dcm_attn_corr, tjd
def xydelay_anal(npzfiles, fix_tau_lo=None): ''' Analyze a "standard" X vs. Y delay calibration, consisting of four observations on a strong calibrator near 0 HA, in the order: 90-degree Low-frequency receiver, 90-degree High-frequency receiver, 0-degree High-frequency receiver, 0-degree Low-frequency receiver It has happened that the low-frequency receiver delays were not set for one of the observation sets. This can be fixed by reading the delay-center table for the relevant date and applying a phase correction according to the delay difference. Setting fix_tau_lo to "first" means correct first scan, and to "last" means correct last scan. ''' import matplotlib.pylab as plt from util import common_val_idx npzfiles = np.array(npzfiles) out = [] for file in npzfiles: out.append(ri.read_npz([file])) out = np.array(out) if fix_tau_lo != None: # Correct for low-frequency delay error, if requested import cal_header as ch from stateframe import extract if fix_tau_lo == 'first': icorr=0 elif fix_tau_lo == 'last': icorr=3 else: print 'Invalid value for fix_tau_lo. Must be "first" or "last"' return xml, buf = ch.read_cal(4,t=Time(out[icorr]['time'][0],format='jd')) dlatbl = extract(buf,xml['Delaycen_ns']) dtau_x, dtau_y = dlatbl[14] - dlatbl[13] dp_x = out[icorr]['fghz']*2*np.pi*dtau_x dp_y = out[icorr]['fghz']*2*np.pi*dtau_y nt, = out[icorr]['time'].shape for i in range(nt): for iant in range(13): out[icorr]['x'][ri.bl2ord[iant,13],0,:,i] *= np.exp(1j*dp_x) out[icorr]['x'][ri.bl2ord[iant,13],1,:,i] *= np.exp(1j*dp_y) out[icorr]['x'][ri.bl2ord[iant,13],2,:,i] *= np.exp(1j*dp_y) out[icorr]['x'][ri.bl2ord[iant,13],3,:,i] *= np.exp(1j*dp_x) dph_lo = get_xy_corr(out[[3,0]], doplot=False) dph_hi = get_xy_corr(out[[2,1]]) fghz = np.union1d(dph_lo['fghz'],dph_hi['fghz']) # Check for LO and HI being off by pi due to pi-ambiguity in xi_rot lo_com, hi_com = common_val_idx(dph_lo['fghz'],dph_hi['fghz']) # Average xi_rot angle difference over common frequencies a = lobe(dph_hi['xi_rot'][hi_com]-dph_lo['xi_rot'][lo_com]) # angle difference xi_rot_diff = np.angle(np.sum(np.exp(1j*a))) # Average angle difference if np.abs(xi_rot_diff) > np.pi/2: # Looks like shifting by pi will get us closer, so shift both xyphase and xi_rot # This does not actually change any phase relationships, it only makes the plots # and HI/LO data comparison more consistent. dph_lo['xyphase'] += np.pi dph_lo['xi_rot'] += np.pi dph_lo['dphi'] += np.pi dph_lo['dph14'] += np.pi ax = plt.figure('XY_Phase').get_axes() for i in range(13): ax[i].plot(dph_lo['fghz'], lobe(dph_lo['dphi'][0,i]), '.',color='C0') ax[i].plot(dph_lo['fghz'], lobe(dph_lo['dphi'][1,i]), '.',color='C1') ax[i].plot(dph_lo['fghz'],lobe(dph_lo['xyphase'][i]),'r.') ax[i].set_xlim(0,20) for i in range(26): ax[13].plot(dph_lo['fghz'],lobe(dph_lo['dph14'][i]),'.') ax[13].plot(dph_lo['fghz'],lobe(dph_lo['xyphase'][13]),'r.') nf, = fghz.shape flo_uniq = np.setdiff1d(dph_lo['fghz'],dph_hi['fghz']) # List of frequencies in LO not in HI idx_lo_not_hi, idx2 = common_val_idx(fghz, flo_uniq) # List of indexes of unique LO frequencies # Make empty arrays with enough frequencies xyphase = np.zeros((14,nf),dtype=float) xi_rot = np.zeros((nf),dtype=float) idx_hi, idx2 = common_val_idx(fghz,dph_hi['fghz']) # List of indexes of HI receiver frequencies xyphase[:14,idx_hi] = dph_hi['xyphase'] # Insert all high-receiver xyphases xyphase[:14,idx_lo_not_hi] = dph_lo['xyphase'][:14,idx_lo_not_hi] # For unique low-receiver frequencies, insert LO xyphases xi_rot[idx_hi] = dph_hi['xi_rot'] # Insert all high-receiver xi_rot xi_rot[idx_lo_not_hi] = lobe(dph_lo['xi_rot'][idx_lo_not_hi]) # For unique low-receiver frequencies, insert LO xi_rot ax[14].plot(fghz,xi_rot) dph_hi.update({'xi_rot':xi_rot, 'xyphase':xyphase, 'fghz':fghz}) print 'Referring to the output of this routine as "xyphase,"' print 'run cal_header.xy_phasecal2sql(xyphase) to write the SQL record.' return dph_hi
def DCM_attn_anal(filename): ''' Analyze a DCMATTNTEST observation to determine the 2- and 4-bit attenuation values. Input is a Miriad file. Returns two arrays, at2 and at4 of size (nant,npol) = (13,2) representing the attenuation, in dB, of the 2- and 4-bit, resp. ''' import read_idb as ri import dbutil as db import cal_header as ch import stateframe as stf import copy from util import Time import matplotlib.pylab as plt out = ri.read_idb([filename]) ts = int(Time(out['time'][0], format='jd').lv + 0.5) te = int(Time(out['time'][-1], format='jd').lv + 0.5) query = 'select Timestamp,DCM_Offset_Attn from fV65_vD15 where Timestamp between ' + str( ts) + ' and ' + str(te) + ' order by Timestamp' cursor = db.get_cursor() data, msg = db.do_query(cursor, query) cursor.close() dcm_offset = data['DCM_Offset_Attn'].reshape( len(data['DCM_Offset_Attn']) / 15, 15) dcm_offset = dcm_offset[:, 0] # All antennas are the same t = Time(out['time'][0], format='jd') xml, buf = ch.read_cal(2, t) table = stf.extract(buf, xml['Attenuation']) bandlist = ((out['fghz'] - 0.5) * 2).astype(int) tbl = table[bandlist - 1] tbl.shape = (len(bandlist), 15, 2) tbl = np.swapaxes(np.swapaxes(tbl, 0, -1), 0, 1) tbl2 = np.broadcast_to(tbl, (out['time'].shape[0], 15, 2, 134)) tbl = copy.copy(np.rollaxis(tbl2, 0, 4)) # Shape (nant,npol,nf,nt) pwr = out['p'][:15] # Shape (nant,npol,nf,nt) # Add value of dcm_offset to table for i, offset in enumerate(dcm_offset): tbl[:, :, :, i] += offset # Clip to valid attenuations tbl = np.clip(tbl, 0, 30) # Isolate good times in various attn states goodm2, = np.where(dcm_offset == -2) goodm2 = goodm2[2:-3] good2, = np.where(dcm_offset == 2) good2 = good2[2:-3] good0, = np.where(dcm_offset[goodm2[-1]:good2[0]] == 0) good0 += goodm2[-1] good0 = good0[2:-3] good4, = np.where(dcm_offset == 4) good4 = good4[2:-3] good6, = np.where(dcm_offset == 6) good6 = good6[2:-3] goodbg = good6 + 30 # Assumes FEMATTN 15 follows good6 30 s later # Perform median over good times and create pwrmed with medians # The 5 indexes correspond to dcm_offsets -2, 0, 2, 4 and 6 nant, npol, nf, nt = pwr.shape pwrmed = np.zeros((nant, npol, nf, 5)) # Do not forget to subtract the background bg = np.median(pwr[:, :, :, goodbg], 3) pwrmed[:, :, :, 0] = np.median(pwr[:, :, :, goodm2], 3) - bg pwrmed[:, :, :, 1] = np.median(pwr[:, :, :, good0], 3) - bg pwrmed[:, :, :, 2] = np.median(pwr[:, :, :, good2], 3) - bg pwrmed[:, :, :, 3] = np.median(pwr[:, :, :, good4], 3) - bg pwrmed[:, :, :, 4] = np.median(pwr[:, :, :, good6], 3) - bg good = np.array([goodm2[0], good0[0], good2[0], good4[0], good6[0]]) tbl = tbl[:, :, :, good] at2 = np.zeros((13, 2), float) at4 = np.zeros((13, 2), float) at8 = np.zeros((13, 2), float) f1, ax1 = plt.subplots(2, 13) f2, ax2 = plt.subplots(2, 13) f3, ax3 = plt.subplots(2, 13) for ant in range(13): for pol in range(2): pts = [] for i in range(4): for v in [0, 4, 8, 12, 16, 20, 24, 28]: idx, = np.where(tbl[ant, pol, :, i] == v) if len(idx) != 0: good, = np.where((tbl[ant, pol, idx, i] + 2) == tbl[ant, pol, idx, i + 1]) if len(good) != 0: pts.append(pwrmed[ant, pol, idx[good], i] / pwrmed[ant, pol, idx[good], i + 1]) pts = np.concatenate(pts) ax1[pol, ant].plot(pts, '.') ax1[pol, ant].set_ylim(0, 2) at2[ant, pol] = np.log10(np.median(pts)) * 10. pts = [] for i in range(3): for v in [0, 2, 8, 10, 16, 18, 24, 26]: idx, = np.where(tbl[ant, pol, :, i] == v) if len(idx) != 0: good, = np.where((tbl[ant, pol, idx, i] + 4) == tbl[ant, pol, idx, i + 2]) if len(good) != 0: pts.append(pwrmed[ant, pol, idx[good], i] / pwrmed[ant, pol, idx[good], i + 2]) pts = np.concatenate(pts) ax2[pol, ant].plot(pts, '.') ax2[pol, ant].set_ylim(0, 3) at4[ant, pol] = np.log10(np.median(pts)) * 10. pts = [] i = 0 for v in [0, 2, 4, 6, 16, 18, 20, 22]: idx, = np.where(tbl[ant, pol, :, i] == v) if len(idx) != 0: good, = np.where((tbl[ant, pol, idx, i] + 8) == tbl[ant, pol, idx, i + 4]) if len(good) != 0: pts.append(pwrmed[ant, pol, idx[good], i] / pwrmed[ant, pol, idx[good], i + 4]) try: pts = np.concatenate(pts) except: # Looks like there were no points for this antenna/polarization, so set to nominal attn pts = [6.30957, 6.30957, 6.30957] ax3[pol, ant].plot(pts, '.') ax3[pol, ant].set_ylim(5, 8) at8[ant, pol] = np.log10(np.median(pts)) * 10. plt.show() # Generate output table, a complex array of size (nant,npol,nbits) attn = np.zeros((16, 2, 4), np.complex) # Set to nominal values, then overwrite with measured ones for i in range(16): for j in range(2): attn[i, j] = [2.0 + 0j, 4.0 + 0j, 8.0 + 0j, 16.0 + 0j] attn[:13, :, 0] = at2 + 0j attn[:13, :, 1] = at4 + 0j attn[:13, :, 2] = at8 + 0j return attn
def rd_refcal(file, quackint=120., navg=3): ''' Reads a single UDB file representing a calibrator scan, and averages over the bands in the file ''' from read_idb import read_idb, bl2ord from copy import deepcopy import chan_util_bc as cu import dbutil as db out = read_idb([file], navg=navg, quackint=quackint) bds = np.unique(out['band']) nt = len(out['time']) nbd = len(bds) vis = np.zeros((15, 4, 34, nt), dtype=complex) fghz = np.zeros(34) # average over channels within each band o = out['x'][bl2ord[13,:13]] for bd in bds: idx = np.where(out['band'] == bd)[0] fghz[bd-1] = np.nanmean(out['fghz'][idx]) vis[:13,:,bd-1] = np.mean(o[:, :, idx], axis=2) # Need to apply unrot to correct for feed rotation, before returning xml, buf = ch.read_cal(11, Time(out['time'][0],format='jd')) dph = extract(buf,xml['XYphase']) xi_rot = extract(buf,xml['Xi_Rot']) freq = extract(buf,xml['FGHz']) freq = freq[np.where(freq != 0)] band = [] for f in freq: band.append(cu.freq2bdname(f)) bds, sidx = np.unique(band, return_index=True) nbd = len(bds) eidx = np.append(sidx[1:], len(band)) dxy = np.zeros((14, 34), dtype=np.float) xi = np.zeros(34, dtype=np.float) fghz = np.zeros(34) # average dph and xi_rot frequencies within each band, to convert to 34-band representation for b, bd in enumerate(bds): fghz[bd - 1] = np.nanmean(freq[sidx[b]:eidx[b]]) xi[bd - 1] = np.nanmean(xi_rot[sidx[b]:eidx[b]]) for a in range(14): dxy[a, bd - 1] = np.angle(np.sum(np.exp(1j * dph[a, sidx[b]:eidx[b]]))) # Read parallactic angles for this scan trange = Time(out['time'][[0,-1]],format='jd') times, chi = db.get_chi(trange) tchi = times.jd t = out['time'] if len(t) > 0: vis2 = deepcopy(vis) idx = nearest_val_idx(t, tchi) pa = chi[idx] # Parallactic angle for the times of this refcal. pa[:, [8, 9, 10, 12]] = 0.0 nt = len(idx) # Number of times in this refcal # Apply X-Y delay phase correction for a in range(13): a1 = lobe(dxy[a] - dxy[13]) a2 = -dxy[13] - xi a3 = dxy[a] - xi + np.pi for j in range(nt): vis2[a, 1, :, j] *= np.exp(1j * a1) vis2[a, 2, :, j] *= np.exp(1j * a2) vis2[a, 3, :, j] *= np.exp(1j * a3) for j in range(nt): for a in range(13): vis[a, 0, :, j] = vis2[a, 0, :, j] * np.cos(pa[j, a]) + vis2[a, 3, :, j] * np.sin(pa[j, a]) vis[a, 2, :, j] = vis2[a, 2, :, j] * np.cos(pa[j, a]) + vis2[a, 1, :, j] * np.sin(pa[j, a]) vis[a, 3, :, j] = vis2[a, 3, :, j] * np.cos(pa[j, a]) - vis2[a, 0, :, j] * np.sin(pa[j, a]) vis[a, 1, :, j] = vis2[a, 1, :, j] * np.cos(pa[j, a]) - vis2[a, 2, :, j] * np.sin(pa[j, a]) # ******* return {'file': file, 'source': out['source'], 'vis': vis, 'bands': bds, 'fghz': fghz, 'times': out['time'], 'ha': out['ha'], 'dec': out['dec'], 'flag': np.zeros_like(vis, dtype=np.int)}