Esempio n. 1
0
def sql2rstn(t=None):
    """This function extracts the RSTN data from SQL with SQL_timestamp 
    0300 on the date supplied. If the values could be extracted then the
    data is returned in a list as follows:
    
    0 - timestamp: Astropy Time which is the date on which the data was
        collected. This should match dt
        
    1 - freq: A float32 numpy array containing the 9 frequencies in GHz
    
    2 - data: The flux data which is a 9x7 int16 numpy array.
    
    SQL_timestamp is also returned."""

    if t is None: t = Time.now()

    sqlt = Time(np.floor(t.mjd) + 0.125, format='mjd')
    xml, buf = ch.read_cal(12, sqlt)

    if buf is None: return None, None

    sqlt_read = Time(extract(buf, xml['SQL_timestamp']), format='lv')
    #if np.floor(sqlt.mjd) != np.floor(sqlt_read.mjd): return None, None

    data = []
    data.append(Time(extract(buf, xml['Timestamp']), format='lv'))
    data.append(extract(buf, xml['FGHz']))
    data.append(extract(buf, xml['Flux']))

    return data, sqlt_read
Esempio n. 2
0
def unrot_refcal(refcal_in):
    ''' Apply feed-rotation correction to data read with rd_refcal(), returning updated data in
        the same format for further processing.
    '''
    import dbutil as db
    import copy
    import chan_util_bc as cu
    import cal_header as ch
    from stateframe import extract
    refcal = copy.deepcopy(refcal_in)
    xml, buf = ch.read_cal(11, Time(refcal['times'][0][0], format='jd'))
    dph = extract(buf, xml['XYphase'])
    xi_rot = extract(buf, xml['Xi_Rot'])
    freq = extract(buf, xml['FGHz'])
    freq = freq[np.where(freq != 0)]
    band = []
    for f in freq:
        band.append(cu.freq2bdname(f))
    bds, sidx = np.unique(band, return_index=True)
    nbd = len(bds)
    eidx = np.append(sidx[1:], len(band))
    dxy = np.zeros((14, 34), dtype=np.float)
    xi = np.zeros(34, dtype=np.float)
    fghz = np.zeros(34)
    # average dph and xi_rot frequencies within each band, to convert to 34-band representation
    for b, bd in enumerate(bds):
        fghz[bd - 1] = np.nanmean(freq[sidx[b]:eidx[b]])
        xi[bd - 1] = np.nanmean(xi_rot[sidx[b]:eidx[b]])
        for a in range(14):
            dxy[a, bd - 1] = np.angle(np.sum(np.exp(1j * dph[a, sidx[b]:eidx[b]])))
    nscans = len(refcal['scanlist'])
    for i in range(nscans):
        # Read parallactic angles for this scan
        trange = Time([refcal['tstlist'][i].iso, refcal['tedlist'][i].iso])
        times, chi = db.get_chi(trange)
        tchi = times.jd
        t = refcal['times'][i]
        if len(t) > 0:
            vis = copy.deepcopy(refcal['vis'][i])
            idx = nearest_val_idx(t, tchi)
            pa = chi[idx]  # Parallactic angle for the times of this refcal.
            pa[:, [8, 9, 10, 12]] = 0.0
            nt = len(idx)  # Number of times in this refcal
            # Apply X-Y delay phase correction
            for a in range(13):
                a1 = lobe(dxy[a] - dxy[13])
                a2 = -dxy[13] - xi
                a3 = dxy[a] - xi + np.pi
                for j in range(nt):
                    vis[a, 1, :, j] *= np.exp(1j * a1)
                    vis[a, 2, :, j] *= np.exp(1j * a2)
                    vis[a, 3, :, j] *= np.exp(1j * a3)
            for j in range(nt):
                for a in range(13):
                    refcal['vis'][i][a, 0, :, j] = vis[a, 0, :, j] * np.cos(pa[j, a]) + vis[a, 3, :, j] * np.sin(pa[j, a])
                    refcal['vis'][i][a, 2, :, j] = vis[a, 2, :, j] * np.cos(pa[j, a]) + vis[a, 1, :, j] * np.sin(pa[j, a])
                    refcal['vis'][i][a, 3, :, j] = vis[a, 3, :, j] * np.cos(pa[j, a]) - vis[a, 0, :, j] * np.sin(pa[j, a])
                    refcal['vis'][i][a, 1, :, j] = vis[a, 1, :, j] * np.cos(pa[j, a]) - vis[a, 2, :, j] * np.sin(pa[j, a])
    return refcal
Esempio n. 3
0
def get_calfac(t=None):
    ''' Read total power and auto-correlation calibration factors from the SQL
        database, for the time specified by Time() object t, or if None, at the
        next earlier calibration time to the current time.
    '''
    tpcal_type = 10  # Calibration type specified in cal_header.py
    if t is None:
        t = Time.now()
    xml, buf = ch.read_cal(tpcal_type, t=t)
    fghz = stateframe.extract(buf, xml['FGHz'])
    nf = len(fghz)
    tpcalfac = np.zeros((13, 2, nf), np.float)
    tpoffsun = np.zeros((13, 2, nf), np.float)
    accalfac = np.zeros((13, 2, nf), np.float)
    acoffsun = np.zeros((13, 2, nf), np.float)
    nant = len(xml['Antenna'])
    for i in range(nant):
        iant = stateframe.extract(buf, xml['Antenna'][i]['Antnum']) - 1
        tpcalfac[iant] = stateframe.extract(buf, xml['Antenna'][i]['TPCalfac'])
        accalfac[iant] = stateframe.extract(buf, xml['Antenna'][i]['ACCalfac'])
        tpoffsun[iant] = stateframe.extract(buf, xml['Antenna'][i]['TPOffsun'])
        acoffsun[iant] = stateframe.extract(buf, xml['Antenna'][i]['ACOffsun'])
    try:
        sqltime = stateframe.extract(buf, xml['SQL_timestamp'])
    except:
        sqltime = None
    return {
        'fghz': fghz,
        'timestamp': stateframe.extract(buf, xml['Timestamp']),
        'sqltime': sqltime,
        'tpcalfac': tpcalfac,
        'accalfac': accalfac,
        'tpoffsun': tpoffsun,
        'acoffsun': acoffsun
    }
Esempio n. 4
0
def sql2refcal(t, lohi=False):
    '''Supply a timestamp in Time format, return the closest refcal data'''
    import cal_header as ch
    import stateframe as stf
    if lohi:
        caltype = 12
    else:
        caltype = 8
    xml, buf = ch.read_cal(caltype, t=t)
    refcal = stf.extract(
        buf, xml['Refcal_Real']) + stf.extract(buf, xml['Refcal_Imag']) * 1j
    flag = stf.extract(buf, xml['Refcal_Flag'])
    fghz = stf.extract(buf, xml['Fghz'])
    sigma = stf.extract(buf, xml['Refcal_Sigma'])
    timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv')
    tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
    ted = Time(stf.extract(buf, xml['T_end']), format='lv')
    pha = np.angle(refcal)
    amp = np.absolute(refcal)
    return {
        'pha': pha,
        'amp': amp,
        'flag': flag,
        'fghz': fghz,
        'sigma': sigma,
        'timestamp': timestamp,
        't_bg': tbg,
        't_ed': ted
    }
Esempio n. 5
0
def get_gain_corr(trange, tref=None, fghz=None):
    ''' Calls get_gain_state() for a timerange and a reference time,
        and returns the gain difference table to apply to data in the
        given timerange.  If no reference time is provided, the gain
        state is referred to the nearest earlier REFCAL.
        
        Returns a dictionary containing:
          antgain    Array of size (15, 2, 34, nt) = (nant, npol, nbands, nt)
          times      A Time() object corresponding to the times in 
                       antgain
    '''
    if tref is None:
        # No reference time specified, so get nearest earlier REFCAL
        xml, buf = ch.read_cal(8,t=trange[0])
        tref = Time(stf.extract(buf,xml['Timestamp']),format='lv')
    # Get the gain state at the reference time (actually median over 1 minute)
    trefrange = Time([tref.iso,Time(tref.lv+61,format='lv').iso])
    ref_gs =  get_gain_state(trefrange)  # refcal gain state for 60 s
    # Get median of refcal gain state (which should be constant anyway)
    ref_gs['h1'] = np.median(ref_gs['h1'],1)
    ref_gs['h2'] = np.median(ref_gs['h2'],1)
    ref_gs['v1'] = np.median(ref_gs['v1'],1)
    ref_gs['v2'] = np.median(ref_gs['v2'],1)

    # Get the gain state of the requested timerange
    src_gs = get_gain_state(trange)   # solar gain state for timerange of file
    nt = len(src_gs['times'])
    antgain = np.zeros((15,2,34,nt),np.float32)   # Antenna-based gains vs. band
    for i in range(15):
        for j in range(34):
            antgain[i,0,j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs['h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][i,0,j] - ref_gs['dcmattn'][i,0,j]
            antgain[i,1,j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs['v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][i,1,j] - ref_gs['dcmattn'][i,1,j]

    return {'antgain': antgain, 'times': src_gs['times']}
Esempio n. 6
0
def acc2sql():
    ''' This is just a test version to read the stateframe once a second from
        the ACC and send it to the SQL server.  A more complete version of this
        is implemented in schedule.py for "production" work.
    '''
    # Get stateframe structure and version
    accini = stateframe.rd_ACCfile()
    sf_version = accini['version']
    brange, outlist = sfdef(accini['sf'])
    with pyodbc.connect("DRIVER={FreeTDS};SERVER=192.168.24.106,1433; \
                             DATABASE=eOVSA06;UID=aaa;PWD=I@bsbn2w;") as cnxn:
        cursor = cnxn.cursor()
        lineno = 0
        while 1:
            lineno += 1
            data, msg = stateframe.get_stateframe(accini)
            version = stateframe.extract(data,['d',8])
            if version == sf_version:
                bufout = transmogrify(data, brange)
                try:
                    cursor.execute('insert into fBin (Bin) values (?)',pyodbc.Binary(bufout))
                    print 'Record '+str(lineno)+' successfully written\r',
                    cnxn.commit()
                except:
                    # An exception could be an error, or just that the entry was already inserted
                    pass
            else:
                print 'Error: Incompatible version in stateframe.'
                break
            time.sleep(1)
Esempio n. 7
0
def fseq_is_running(fseqfile, accini=None):
    ''' Check current stateframe to see if the given sequence file is
        running.  Returns True if so, False otherwise
    '''
    import stateframe as stf
    if accini is None:
        accini = stf.rd_ACCfile()
    # Make sure this sequence is actually running, or start it if not
    buf, msg = stf.get_stateframe(accini)
    if msg != 'No Error':
        print 'Error reading stateframe.'
        return None
    fseq = stf.extract(buf, accini['sf']['LODM']['LO1A']['FSeqFile'])
    fseq = fseq.strip('\x00')  # strip nulls from name
    result = fseq == fseqfile and stf.extract(
        buf, accini['sf']['LODM']['LO1A']['SweepStatus']) == 8
    return result
Esempio n. 8
0
def rstntext2sql(startdt, enddt, logfile=None):
    """This routine extracts data from the old archive text file and
    writes it to SQL. It will output a list of dates that were not
    archived. The SQL time will be the date of the data at 0300.
    The program checks to see if there is current data already for
    the date range. It will NOT overwrite a record if it is already
    present.
    
    startdt and enddt are the dates that will be written to SQL from
    startdt up to but not including enddt."""

    data = rstnfluxfromtextarchive(startdt, enddt)
    if data is None:
        print "No RSTN data found in range ", startdt.iso, " to ", enddt.iso
        return

    print "Processing data from: ", startdt.iso, " to ", enddt.iso

    offset = int(np.floor(startdt.mjd))
    days = int(np.floor(enddt.mjd)) - offset

    processed = np.zeros((days), dtype=bool)
    recordswritten = 0
    existingrecords = 0
    for d in data:
        print "Processing Date: ", d[0].iso
        i = int(np.floor(d[0].mjd)) - offset
        processed[i] = True
        sqltime = Time(np.floor(d[0].mjd) + 0.125, format='mjd')
        xml, buf = ch.read_cal(12, sqltime)
        if buf is not None:
            sqltime_read = Time(extract(buf, xml['SQL_timestamp']),
                                format='lv')
            if np.floor(sqltime_read.mjd) != np.floor(sqltime.mjd): buf = None

        if buf is None:
            if ch.rstnflux2sql(d, sqltime):
                recordswritten += 1
                print "Record Written"
            else:
                print "Record Write Failed!"
        else:
            print "Record Exists."
            existingrecords += 1

    if logfile is None: logfile = "/tmp/missingrstn.txt"

    f = open(logfile, "w")
    for i in range(days):
        if not processed[i]:
            f.write(Time(float(i + offset) + 0.125, format='mjd').iso + "\n")
    f.close()

    print "Number of days searched:    ", days
    print "Number of existing records: ", existingrecords
    print "Records Written:            ", recordswritten
    print "Missing Records:            ", days - (recordswritten +
                                                  existingrecords)
Esempio n. 9
0
def DCM_cal(filename=None,fseqfile='gainseq.fsq',dcmattn=None,missing='ant15',update=False):

    if filename is None:
        return 'Must specify ADC packet capture filename, e.g. "/dppdata1/PRT/PRT<yyyymmddhhmmss>adc.dat"'

    userpass = '******'
    fseq_handle = urllib2.urlopen('ftp://'+userpass+'acc.solar.pvt/parm/'+fseqfile,timeout=0.5)
    lines = fseq_handle.readlines()
    fseq_handle.close()
    for line in lines:
        if line.find('LIST:SEQUENCE') != -1:
            line = line[14:]
            bandlist = np.array(map(int,line.split(',')))
    if len(np.unique(bandlist)) != 34:
        print 'Frequency sequence must contain all bands [1-34]'
        return None
    # Read packet capture file
    adc = p.rd_jspec(filename)
    pwr = np.rollaxis(adc['phdr'],2)[:,:,:2]
    # Put measured power into uniform array arranged by band
    new_pwr = np.zeros((34,16,2))
    for i in range(34):
        idx, = np.where(bandlist-1 == i)
        if len(idx) > 0:
            new_pwr[i] = np.median(pwr[idx],0)
    new_pwr.shape = (34,32)
    # Read table from the database.
    import cal_header
    import stateframe
    xml, buf = cal_header.read_cal(2)
    cur_table = stateframe.extract(buf,xml['Attenuation'])
    
    if dcmattn:
        # A DCM attenuation value was given, which presumes a constant value
        # so use it as the "original table."
        orig_table = np.zeros((34,30)) + dcmattn
        # orig_table[:,26:28] = 24
        orig_table[:,28:] = 0
    else:
        # No DCM attenuation value was given, so use current DCM master
        # table from the database.
        orig_table = cur_table
        
    attn = np.log10(new_pwr[:,:-2]/1600.)*10.
    # Zero any changes for missing antennas, and override orig_table with cur_table for those antennas
    if missing:
        idx = p.ant_str2list(missing)
        bad = np.sort(np.concatenate((idx*2,idx*2+1)))
        attn[:,bad] = 0
        orig_table[:,bad] = cur_table[:,bad]
    new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2
    DCMlines = []
    DCMlines.append('#      Ant1  Ant2  Ant3  Ant4  Ant5  Ant6  Ant7  Ant8  Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
    DCMlines.append('#      X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y')
    DCMlines.append('#     ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
    for band in range(1,35):
        DCMlines.append('{:2} :  {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1]))
    return DCMlines
Esempio n. 10
0
def read_dbcalfac(t):
    ''' Read the contents of a SOLPNT calibration file from the SQL database, and return
        fghz, calfac, offsun arrays
    '''
    import stateframe
    import cal_header as ch
    try:
        tp, buf = ch.read_cal(1, t)
    except:
        print 'READ_DBCALFAC: Error reading calibration factors'
        return None, None, None
    fghz = stateframe.extract(buf, tp['FGHz'])
    poln = stateframe.extract(buf, tp['Poln'])
    nf = len(fghz)
    npol = len(poln)
    nant = len(tp['Antenna'])
    calfac = np.zeros((npol, nf, nant), 'float')
    offsun = np.zeros((npol, nf, nant), 'float')
    for i, ant in enumerate(tp['Antenna']):
        calfac[:, :, i] = stateframe.extract(buf, ant['Calfac'])
        offsun[:, :, i] = stateframe.extract(buf, ant['Offsun'])
    idx = np.isfinite(calfac[0, :, 0])
    return fghz[idx], calfac[:, idx, :], offsun[:, idx, :]
Esempio n. 11
0
def sql2refcal(t):
    '''Supply a timestamp in Time format, return the closest refcal data'''
    import cal_header as ch
    import stateframe as stf
    xml, buf = ch.read_cal(8, t=t)
    refcal = stf.extract(buf, xml['Refcal_Real']) + stf.extract(buf, xml['Refcal_Imag']) * 1j
    flag = stf.extract(buf, xml['Refcal_Flag'])
    fghz = stf.extract(buf, xml['Fghz'])
    sigma = stf.extract(buf, xml['Refcal_Sigma'])
    timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv')
    tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
    ted = Time(stf.extract(buf, xml['T_end']), format='lv')
    pha = np.angle(refcal)
    amp = np.absolute(refcal)
    return {'pha': pha, 'amp': amp, 'flag': flag, 'fghz': fghz, 'sigma': sigma, 'timestamp': timestamp, 't_bg': tbg,
            't_ed': ted}
Esempio n. 12
0
def read_dbcalfac(t):
    ''' Read the contents of a SOLPNT calibration file from the SQL database, and return
        fghz, calfac, offsun arrays
    '''
    import stateframe
    import cal_header as ch
    try:
        tp, buf = ch.read_cal(1,t)
    except:
        print 'READ_DBCALFAC: Error reading calibration factors'
        return None, None, None
    fghz = stateframe.extract(buf,tp['FGHz'])
    poln = stateframe.extract(buf,tp['Poln'])
    nf = len(fghz)
    npol = len(poln)
    nant = len(tp['Antenna'])
    calfac = np.zeros((npol,nf,nant),'float')
    offsun = np.zeros((npol,nf,nant),'float')
    for i,ant in enumerate(tp['Antenna']):
        calfac[:,:,i] = stateframe.extract(buf,ant['Calfac'])
        offsun[:,:,i] = stateframe.extract(buf,ant['Offsun'])
    idx = np.isfinite(calfac[0,:,0])
    return fghz[idx], calfac[:,idx,:], offsun[:,idx,:]
Esempio n. 13
0
def get_val_from_stflog(filename,loc):
    arr = False
    if len(loc) == 3:
        print 'Cannot plot multiple values--will plot only index 0'
        arr = True
    with open(filename,'rb') as f:
        data = f.read(100)
        recsize = struct.unpack_from('i',data,16)[0]
        tstamp = stateframe.extract(data,['d',0])
        t = Time(tstamp,format='lv')
        f.seek(0)
        data = f.read(recsize)
        i = 0
        nrec = os.stat(filename).st_size/recsize
        tm = np.zeros(nrec,'float')
        val = np.zeros(nrec,'float')
        while len(data) == recsize:
            # Read timestamps
            tm[i] = stateframe.extract(data,['d',0])
            if arr:
                val[i] = stateframe.extract(data,loc)[0]
            else:
                val[i] = stateframe.extract(data,loc)
            data = f.read(recsize)
            i += 1
    # Convert timestamps to Time() array
    times = Time(tm,format='lv')
    bad = np.where(val == 0.0)[0]
    val[bad] = np.NaN
    tm[bad] = np.NaN
    fig = plt.figure()
    sub_plot = fig.add_subplot(111)
    sub_plot.plot_date(times.plot_date,val,'+')
    fmt = plt.DateFormatter('%H:%M')
    sub_plot.xaxis.set_major_formatter(fmt)
    plt.title(t.iso[0:10])
    return times,val
Esempio n. 14
0
def chk_lo1a(accini, band, iteration=1):
    data, msg = stf.get_stateframe(accini)
    errstr = stf.extract(data,accini['sf']['LODM']['LO1A']['ERR']).split('"')[1]
    if iteration == 1 and errstr != 'No error':
        # Looks like a reboot of LO1A is needed!
        print '10-s delay while attempting to reboot LO1A'
        send_cmds(['LO1A-REBOOT'],acc)
        time.sleep(10)
        acc = {'host': accini['host'], 'scdport':accini['scdport']}
        acc_tune(band+1,acc)
        time.sleep(5)
        errstr = chk_lo1a(accini,band,iteration=2)
        if errstr != 'No error':
            errstr = 'Reboot attempt failed.'
    return errstr
Esempio n. 15
0
def read_attncal(trange=None):
    ''' Given a timerange as a Time() object, read FEM attenuation
        records for each date from the SQL database, and return then
        as a list of attn dictionaries.  To get values for only a single
        day, the trange Time() object can have the same time repeated, or can
        be a single time.
        
        Returns a list of dictionaries, each pertaining to one of the days
        in trange, with keys defined as follows:
           'time':      The start time of the GAINCALTEST scan, as a Time() object
           'fghz':      The list of frequencies [GHz] at which attenuations are measured
           'attn':      The array of attenuations [dB] of size (nattn, nant, npol, nf), 
                           where nattn = 8, nant = 13, npol = 2, and nf is variable

    '''
    import cal_header as ch
    import stateframe as stf
    if trange is None:
        trange = Time.now()
    if type(trange.mjd) == np.float:
        # Interpret single time as both start and end time
        mjd1 = int(trange.mjd)
        mjd2 = mjd1
    else:
        mjd1, mjd2 = trange.mjd.astype(int)
    attn = []
    for mjd in range(mjd1, mjd2 + 1):
        # Read next earlier SQL entry from end of given UT day (mjd+0.999)
        xml, buf = ch.read_cal(7, t=Time(mjd + 0.999, format='mjd'))
        t = Time(stf.extract(buf, xml['Timestamp']), format='lv')
        fghz = stf.extract(buf, xml['FGHz'])
        nf = len(np.where(fghz != 0.0)[0])
        fghz = fghz[:nf]
        attnvals = stf.extract(buf, xml['FEM_Attn_Real'])[:, :, :, :nf]
        attn.append({'time': t, 'fghz': fghz, 'attn': attnvals})
    return attn
Esempio n. 16
0
def set_fem_attn(level=3, ant_str='ant1-15'):
    ''' Read current power and attenuation values in FEMs, and set the attenuation
        such that the power level will be as close to "level" as possible.
    '''
    accini = stf.rd_ACCfile()
    acc = {'host': accini['host'], 'scdport': accini['scdport']}

    ant_list = ant_str2list(ant_str)
    if ant_list is None:
        return 'Bad antenna list'
    accini = stf.rd_ACCfile()
    hatn1 = np.zeros((10, 15), dtype='int')
    vatn1 = np.zeros((10, 15), dtype='int')
    hatn2 = np.zeros((10, 15), dtype='int')
    vatn2 = np.zeros((10, 15), dtype='int')
    hpwr = np.zeros((10, 15), dtype='float')
    vpwr = np.zeros((10, 15), dtype='float')
    # Read 10 instances of attenuation and power, and take the median to avoid
    # glitches
    for i in range(10):
        # Read the frontend attenuations and powers for each antenna
        data, msg = stf.get_stateframe(accini)
        for iant in range(15):
            fem = accini['sf']['Antenna'][iant]['Frontend']['FEM']
            hatn1[i, iant] = stf.extract(data,
                                         fem['HPol']['Attenuation']['First'])
            vatn1[i, iant] = stf.extract(data,
                                         fem['VPol']['Attenuation']['First'])
            hatn2[i, iant] = stf.extract(data,
                                         fem['HPol']['Attenuation']['Second'])
            vatn2[i, iant] = stf.extract(data,
                                         fem['VPol']['Attenuation']['Second'])
            hpwr[i, iant] = stf.extract(data, fem['HPol']['Power'])
            vpwr[i, iant] = stf.extract(data, fem['VPol']['Power'])
        time.sleep(1)
    hatn1 = np.median(hatn1, 0).astype('int')
    vatn1 = np.median(vatn1, 0).astype('int')
    hatn2 = np.median(hatn2, 0).astype('int')
    vatn2 = np.median(vatn2, 0).astype('int')
    hpwr = np.median(hpwr, 0)
    vpwr = np.median(vpwr, 0)
    hatn2 = np.clip(hatn2 - (level - hpwr).astype('int'), 0, 31)
    vatn2 = np.clip(vatn2 - (level - vpwr).astype('int'), 0, 31)
    # Send attenuation commands to each antenna in ant_list
    for iant in ant_list:
        hatn = str(hatn1[iant]) + ' ' + str(
            hatn2[iant]) + ' ant' + str(iant + 1)
        vatn = str(vatn1[iant]) + ' ' + str(
            vatn2[iant]) + ' ant' + str(iant + 1)
        send_cmds(['HATTN ' + hatn, 'VATTN ' + vatn], acc)
    return 'Success'
Esempio n. 17
0
def compare_tbl(tbl, t=None):
    ''' Compare the given table with that from the SQL database.
    
        Inputs:
           tbl       A text string version of a table as returned by DCM_calnew()
           t         An optional Time() object giving a date/time of the table to compare to.
                       If omitted or None, the currently active table is used.  Note, the
                       exact date is not required.  Any date after the desired SQL record
                       will return the active table for that date.
    '''
    import cal_header
    import stateframe
    from copy import deepcopy
    xml, buf = cal_header.read_cal(2, t=t)
    cur_table = stateframe.extract(buf,xml['Attenuation'])
    diftbl = deepcopy(tbl)   # Make a copy to update, to preserve structure
    for i in range(52):
        dif = map(int,tbl[i+3][4:].split()) - cur_table[i]
        diftbl[3+i] = '{:2} : {:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}{:3}'.format(i,*dif)
    return diftbl
Esempio n. 18
0
def bandlist2dcmtable(bandlist, toACC=False):
    '''Use list of bands representing a frequency sequence, to set 
       dcmtable.txt from the DCM_master_table, and return the lines
       of the table.  Optionally, the table is sent to ACC.
       
       Input:
          bandlist   numpy 50-element integer array of band numbers, 1-34
          toACC      optional boolean.  If True, sends results to ACC and 
                       the SQL database.  Default is False (does not send)
    '''
    import stateframe as stf
    import cal_header as ch
    from ftplib import FTP
    # Convert from 1-based bandlist to zero-based band numbers
    bands = bandlist - 1
    # Read master table from SQL server
    dcm, buf = ch.read_cal(2)
    dcm_m_attn = stf.extract(buf, dcm['Attenuation'])
    dcm_attn = dcm_m_attn[bands]
    lines = []
    g = open('/tmp/DCM_table.txt', 'w')
    for line in dcm_attn:
        l = ' '.join(map(str, line))
        lines.append(l)
        g.write(l + '\n')
    g.close()
    if toACC:
        ch.dcm_table2sql(lines)
        # Connect to ACC /parm directory and transfer scan_header files
        try:
            g = open('/tmp/DCM_table.txt', 'r')
            acc = FTP('acc.solar.pvt')
            acc.login('admin', 'observer')
            acc.cwd('parm')
            # Send DCM table lines to ACC
            print acc.storlines('STOR dcm.txt', g)
            g.close()
            print 'Successfully wrote dcm.txt to ACC'
        except:
            print 'Cannot FTP dcm.txt to ACC'
    return lines
Esempio n. 19
0
def override(tbl, bandlist, t=None):
    ''' When a table has bad attenuation for a given band, this command will replace
        the new values with the ones in the current DCM_master_table.
        
        Inputs:
           tbl       A text string version of a table as returned by DCM_calnew()
           bandlist  A simple list of band numbers (1 - 52) that will be replaced.
           t         An optional Time() object giving a date/time of the table to override from.
                       If omitted or None, the currently active table is used.  Note, the
                       exact date is not required.  Any date after the desired SQL record
                       will return the active table for that date.
    '''
    import cal_header
    import stateframe
    xml, buf = cal_header.read_cal(2, t=t)
    cur_table = stateframe.extract(buf,xml['Attenuation'])
    if type(bandlist) != list:
        # If bandlist is not a list, it may be just a single band, so make it a list (of one).
        bandlist = [bandlist]
    for band in bandlist:
        # Format cur_table[band-1] as text line, and use it to replace tbl[band+2] line
        tbl[band+2] = tbl[band+2][:5]+('{:3d}'*30).format(*cur_table[band-1])
    return tbl
Esempio n. 20
0
def set_fem_attn(level=3,ant_str='ant1-15'):
    ''' Read current power and attenuation values in FEMs, and set the attenuation
        such that the power level will be as close to "level" as possible.
    '''
    accini = stf.rd_ACCfile()
    acc = {'host': accini['host'], 'scdport':accini['scdport']}
    
    ant_list = ant_str2list(ant_str)
    if ant_list is None:
        return 'Bad antenna list'
    accini = stf.rd_ACCfile()
    hatn1 = np.zeros((10,15),dtype='int')
    vatn1 = np.zeros((10,15),dtype='int')
    hatn2 = np.zeros((10,15),dtype='int')
    vatn2 = np.zeros((10,15),dtype='int')
    hpwr = np.zeros((10,15),dtype='float')
    vpwr = np.zeros((10,15),dtype='float')
    # Read 10 instances of attenuation and power, and take the median to avoid
    # glitches
    for i in range(10):
        # Read the frontend attenuations and powers for each antenna
        data, msg = stf.get_stateframe(accini)
        for iant in range(15):
            fem = accini['sf']['Antenna'][iant]['Frontend']['FEM']
            hatn1[i,iant] = stf.extract(data,fem['HPol']['Attenuation']['First'])
            vatn1[i,iant] = stf.extract(data,fem['VPol']['Attenuation']['First'])
            hatn2[i,iant] = stf.extract(data,fem['HPol']['Attenuation']['Second'])
            vatn2[i,iant] = stf.extract(data,fem['VPol']['Attenuation']['Second'])
            hpwr[i,iant] = stf.extract(data,fem['HPol']['Power'])
            vpwr[i,iant] = stf.extract(data,fem['VPol']['Power'])
        time.sleep(1)
    hatn1 = np.median(hatn1,0).astype('int')
    vatn1 = np.median(vatn1,0).astype('int')
    hatn2 = np.median(hatn2,0).astype('int')
    vatn2 = np.median(vatn2,0).astype('int')
    hpwr = np.median(hpwr,0)
    vpwr = np.median(vpwr,0)
    hatn2 = np.clip(hatn2 - (level - hpwr).astype('int'),0,31)
    vatn2 = np.clip(vatn2 - (level - vpwr).astype('int'),0,31)
    # Send attenuation commands to each antenna in ant_list
    for iant in ant_list:
        hatn = str(hatn1[iant])+' '+str(hatn2[iant])+' ant'+str(iant+1)
        vatn = str(vatn1[iant])+' '+str(vatn2[iant])+' ant'+str(iant+1)
        send_cmds(['HATTN '+hatn,'VATTN '+vatn],acc)
    return 'Success'
Esempio n. 21
0
def fem_attn_update(fem_attn, t=None, rdfromsql=True):
    '''Given a record of the frontend attenuation levels from the stateframe, recalculate the corrected attenuations levels.
       fem_attn_in: recorded attn levels in a 10-min duration
       fem_attn_bitv: complex corrections to be applied to the data. Read from the stateframe or provided as a (16, 2, 2, 5) array'''
    import cal_header as ch
    import stateframe as stf
    if rdfromsql:
        xml, buf = ch.read_cal(7,t)
        fem_attn_bitv=np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Real'])) + np.nan_to_num(stf.extract(buf, xml['FEM_Attn_Imag'])) * 1j
     
    h1=fem_attn['h1']
    h2=fem_attn['h2']
    v1=fem_attn['v1']
    v2=fem_attn['v2']
    attn=np.concatenate((np.concatenate((h1[...,None],v1[...,None]),axis=2)[...,None],
                         np.concatenate((h2[...,None],v2[...,None]),axis=2)[...,None]),axis=3)
    # Start with 0 attenuation as reference
    fem_attn_out=attn*0
    # Calculate resulting attenuation based on bit attn values (1,2,4,8,16)
    for i in range(5):
        fem_attn_out = fem_attn_out + (np.bitwise_and(attn,2**i)>>i)*fem_attn_bitv[...,i]
    #fem_gain=10.**(-fem_gain_db/10.)
    return fem_attn_out
Esempio n. 22
0
def find_calibrations(year, month):
    import calendar
    from util import Time
    import cal_header as ch
    import stateframe
    hc = calendar.HTMLCalendar(calendar.SUNDAY)
    html_table = hc.formatmonth(year, month)
    lines = html_table.split('\n')
    lines[2] = lines[2].replace('th class', 'th width="100" class')
    c = calendar.TextCalendar(calendar.SUNDAY)
    for i in c.itermonthdays(year, month):
        if i != 0:
            t = Time(str(year) + '-' + str(month) + '-' + str(i) + ' 20:00')
            cals = []
            for caltype in [8, 9, 10]:
                xml, buf = ch.read_cal(caltype, t)
                if buf is None:
                    cals.append(0)
                else:
                    tout = Time(stateframe.extract(buf, xml['Timestamp']),
                                format='lv')
                    if (t - tout).value < 1. / 3:
                        cals.append(1)
                    else:
                        cals.append(0)
            for k, line in enumerate(lines[3:]):
                idx = line.find(str(i))
                ns = len(str(i))
                if idx != -1:
                    line = line[:idx +
                                ns] + '<br>-r- -p- -tp- <br>&nbsp;{} &nbsp; {} &nbsp; {}'.format(
                                    *cals) + line[idx + ns:]
                    break
            lines[k + 3] = line
    #print ''.join(line+'\n' for line in lines)
    return ''.join(line + '\n' for line in lines)
Esempio n. 23
0
def get_calfac(t=None):
    ''' Read total power and auto-correlation calibration factors from the SQL
        database, for the time specified by Time() object t, or if None, at the
        next earlier calibration time to the current time.
    '''
    tpcal_type = 10  # Calibration type specified in cal_header.py
    if t is None:
        t = Time.now()
    xml, buf = ch.read_cal(tpcal_type,t=t)
    fghz = stateframe.extract(buf,xml['FGHz'])
    nf = len(fghz)
    tpcalfac = np.zeros((13,2,nf),np.float)
    tpoffsun = np.zeros((13,2,nf),np.float)
    accalfac = np.zeros((13,2,nf),np.float)
    acoffsun = np.zeros((13,2,nf),np.float)
    nant = len(xml['Antenna'])
    for i in range(nant):
        iant = stateframe.extract(buf,xml['Antenna'][i]['Antnum'])-1
        tpcalfac[iant] = stateframe.extract(buf,xml['Antenna'][i]['TPCalfac'])
        accalfac[iant] = stateframe.extract(buf,xml['Antenna'][i]['ACCalfac'])
        tpoffsun[iant] = stateframe.extract(buf,xml['Antenna'][i]['TPOffsun'])
        acoffsun[iant] = stateframe.extract(buf,xml['Antenna'][i]['ACOffsun'])
    return {'fghz':fghz,'timestamp':stateframe.extract(buf,xml['Timestamp']),
            'tpcalfac':tpcalfac,'accalfac':accalfac,'tpoffsun':tpoffsun,'acoffsun':acoffsun}
Esempio n. 24
0
def get_fem_level(trange, dt=None):
    ''' Get FEM attenuation levels for a given timerange.  Returns a dictionary
        with keys as follows:

        times:     A Time object containing the array of times, size (nt)
        hlev:      The FEM attenuation level for HPol, size (nt, 15) 
        vlev:      The FEM attenuation level for VPol, size (nt, 15)
        dcmattn:   The base DCM attenuations for 34 bands x 15 antennas x 2 Poln, size (34,30)
                      The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc.
        dcmoff:    If DPPoffset-on is 0, this is None (meaning there are no changes to the
                      above base attenuations).  
                   If DPPoffset-on is 1, then dcmoff is a table of offsets to the 
                      base attenuation, size (nt, 50).  The offset applies to all 
                      antennas/polarizations.
                      
        Optional keywords:
           dt      Seconds between entries to read from SQL stateframe database. 
                     If omitted, 1 s is assumed.
        
    '''
    if dt is None:
        tstart, tend = [str(i) for i in trange.lv]
    else:
        # Expand time by 1/2 of dt before and after
        tstart = str(np.round(trange[0].lv - dt / 2))
        tend = str(np.round(trange[1].lv + dt / 2))
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor, trange[0].lv)
    # Get front end attenuator states
    query = 'select Timestamp,Ante_Fron_FEM_Clockms,' \
            +'Ante_Fron_FEM_HPol_Regi_Level,Ante_Fron_FEM_VPol_Regi_Level from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        if dt:
            # If we want other than full cadence, get new array shapes and times
            n = len(data['Timestamp'])  # Original number of times
            new_n = (
                n / 15 / dt
            ) * 15 * dt  # Truncated number of times equally divisible by dt
            new_shape = (n / 15 / dt, dt, 15)  # New shape of truncated arrays
            times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt],
                         format='lv')
        else:
            times = Time(data['Timestamp'].astype('int')[::15], format='lv')
        hlev = data['Ante_Fron_FEM_HPol_Regi_Level']
        vlev = data['Ante_Fron_FEM_VPol_Regi_Level']
        ms = data['Ante_Fron_FEM_Clockms']
        nt = len(hlev) / 15
        hlev.shape = (nt, 15)
        vlev.shape = (nt, 15)
        ms.shape = (nt, 15)
        # Find any entries for which Clockms is zero, which indicates where no
        # gain-state measurement is available.
        for i in range(15):
            bad, = np.where(ms[:, i] == 0)
            if bad.size != 0 and bad.size != nt:
                # Find nearest adjacent good value
                good, = np.where(ms[:, i] != 0)
                idx = nearest_val_idx(bad, good)
                hlev[bad, i] = hlev[good[idx], i]
                vlev[bad, i] = vlev[good[idx], i]
        if dt:
            # If we want other than full cadence, find mean over dt measurements
            hlev = np.mean(hlev[:new_n / 15].reshape(new_shape), 1)
            vlev = np.mean(vlev[:new_n / 15].reshape(new_shape), 1)
        # Put results in canonical order [nant, nt]
        hlev = hlev.T
        vlev = vlev.T
    else:
        print 'Error reading FEM levels:', msg
        return {}
    # Get back end attenuator states
    xml, buf = ch.read_cal(2, t=trange[0])
    dcmattn = stf.extract(buf, xml['Attenuation'])
    dcmattn.shape = (34, 15, 2)
    # Put into canonical order [nant, npol, nband]
    dcmattn = np.moveaxis(dcmattn, 0, 2)
    # See if DPP offset is enabled
    query = 'select Timestamp,DPPoffsetattn_on from fV' \
            +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        dppon = data['DPPoffsetattn_on']
        if np.where(dppon > 0)[0].size == 0:
            dcm_off = None
        else:
            query = 'select Timestamp,DCMoffset_attn from fV' \
                    +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
            data, msg = db.do_query(cursor, query)
            if msg == 'Success':
                otimes = Time(data['Timestamp'].astype('int')[::15],
                              format='lv')
                dcmoff = data['DCMoffset_attn']
                dcmoff.shape = (nt, 50)
                # We now have a time-history of offsets, at least some of which are non-zero.
                # Offsets by slot number do us no good, so we need to translate to band number.
                # Get fseqfile name at mean of timerange, from stateframe SQL database
                fseqfile = get_fseqfile(
                    Time(int(np.mean(trange.lv)), format='lv'))
                if fseqfile is None:
                    print 'Error: No active fseq file.'
                    dcm_off = None
                else:
                    # Get fseqfile from ACC and return bandlist
                    bandlist = fseqfile2bandlist(fseqfile)
                    # Use bandlist to covert nt x 50 array to nt x 34 band array of DCM attn offsets
                    # Note that this assumes DCM offset is the same for any multiply-sampled bands
                    # in the sequence.
                    dcm_off = np.zeros((nt, 34), float)
                    dcm_off[:, bandlist - 1] = dcmoff
                    # Put into canonical order [nband, nt]
                    dcm_off = dcm_off.T
                    if dt:
                        # If we want other than full cadence, find mean over dt measurements
                        new_nt = len(times)
                        dcm_off = dcm_off[:, :new_nt * dt]
                        dcm_off.shape = (34, dt, new_nt)
                        dcm_off = np.mean(dcm_off, 1)
            else:
                print 'Error reading DCM attenuations:', msg
                dcm_off = None
    else:
        print 'Error reading DPPon state:', msg
        dcm_off = None
    cursor.close()
    return {
        'times': times,
        'hlev': hlev.astype(int),
        'vlev': vlev.astype(int),
        'dcmattn': dcmattn,
        'dcmoff': dcm_off
    }
Esempio n. 25
0
def old_version_test(sflog=None,sfxml=None,outbinfile=None,outtabfile=None):
    ''' Read stateframe log files of older versions and
        create output file of rearranged binary data, and
        corresponding stateframedef table as text file.
           sflog = file name of stateframe log to read
           sfxml = file name of corresponding XML file
           outbinfile = file name of output binary data file
           outtabfile = file name of output table text file
    '''
    if sfxml:
        sf, version = rxml.xml_ptrs(sfxml)
    else:
        sf = None
        version = 0.0

    if sflog:
        try:
            f = open(sflog,'rb')
        except:
            print 'Could not open file',sflog,'-- Exiting.'
            return
    
        # Get binary size and check version number
        data = f.read(100)
        if stateframe.extract(data,['d',8]) != version:
            print 'Stateframe log file version does not match XML version. -- Exiting'
            return
        recsize = stateframe.extract(data,sf['Binsize'])
        f.close()
    else:
        # No log file specified, so we will try to read directly from ACC once per second
        # Read one as a test and get its version number
        # Read from ACC
        accini = stateframe.rd_ACCfile()
        data, msg = stateframe.get_stateframe(accini)
        version = stateframe.extract(data,['d',8])
        

    # Parse the stateframe dictionary and generate the brange and outlist dicts
    brange, outlist = sfdef(sf)
    # Calculate the startbytes in the list -- modifies outlist in place
    startbyte(outlist)

    stdout = sys.stdout  # Save current stdout
    if outtabfile:
        # Write the table info to the given file name -- just sets stdout to the file,
        # writes it, and resets stdout
        try:
            sys.stdout = open(outtabfile,'w')
        except:
            print 'Could not redirect STDOUT to',outtabfile,' -- Will print to screen'
            sys.stdout = stdout

    outlist2table(outlist,version)
    sys.stdout = stdout   # Reset to standard stdout

    if outbinfile:
        try:
            g = open(outbinfile,'wb')
        except:
            print 'Could not open file',outbinfile,'for writing. -- Exiting.'
            return
        if sflog:
            # Read from log file
            f = open(sflog,'rb')
            while 1:
                # Read and rearrange 1000 records
                try:
                    indata = f.read(recsize)
                    outdata = transmogrify(indata,brange)
                    g.write(outdata)
                except:
                    f.close()
                    g.close()
                    return
        else:
            # Read from ACC
            accini = stateframe.rd_ACCfile()
            for i in range(60):
                # Read from ACC and rearrange 60 records -- takes 1 min
                indata, msg = stateframe.get_stateframe(accini)
                outdata = transmogrify(indata,brange)
                g.write(outdata)
                time.sleep(1)
            g.close()

    return            
Esempio n. 26
0
def sql2phacalX(trange, *args, **kwargs):
    '''Supply a timestamp in Time format, return the closest phacal data.
        If a time range is provided, return records within the time range.'''
    import cal_header as ch
    import stateframe as stf
    xml, bufs = ch.read_calX(9, t=trange, *args, **kwargs)
    if isinstance(bufs, list):
        phacals = []
        for i, buf in enumerate(bufs):
            try:
                phacal_flag = stf.extract(buf, xml['Phacal_Flag'])
                fghz = stf.extract(buf, xml['Fghz'])
                sigma = stf.extract(buf, xml['Phacal_Sigma'])
                timestamp = Time(stf.extract(buf, xml['Timestamp']),
                                 format='lv')
                tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
                ted = Time(stf.extract(buf, xml['T_end']), format='lv')
                pha = stf.extract(buf, xml['Phacal_Pha'])
                amp = stf.extract(buf, xml['Phacal_Amp'])
                tmp = stf.extract(buf, xml['MBD'])
                poff, pslope = tmp[:, :, 0], tmp[:, :, 1]
                flag = stf.extract(buf, xml['Flag'])[:, :, 0]
                t_ref = Time(stf.extract(buf, xml['T_refcal']), format='lv')
                phacals.append({
                    'pslope': pslope,
                    't_pha': timestamp,
                    'flag': flag,
                    'poff': poff,
                    't_ref': t_ref,
                    'phacal': {
                        'pha': pha,
                        'amp': amp,
                        'flag': phacal_flag,
                        'fghz': fghz,
                        'sigma': sigma,
                        'timestamp': timestamp,
                        't_bg': tbg,
                        't_ed': ted
                    }
                })
            except:
                print 'failed to load record {} ---> {}'.format(
                    i + 1,
                    Time(stf.extract(buf, xml['Timestamp']), format='lv').iso)
        return phacals
    elif isinstance(bufs, str):
        phacal_flag = stf.extract(bufs, xml['Phacal_Flag'])
        fghz = stf.extract(bufs, xml['Fghz'])
        sigma = stf.extract(bufs, xml['Phacal_Sigma'])
        timestamp = Time(stf.extract(bufs, xml['Timestamp']), format='lv')
        tbg = Time(stf.extract(bufs, xml['T_beg']), format='lv')
        ted = Time(stf.extract(bufs, xml['T_end']), format='lv')
        pha = stf.extract(bufs, xml['Phacal_Pha'])
        amp = stf.extract(bufs, xml['Phacal_Amp'])
        tmp = stf.extract(bufs, xml['MBD'])
        poff, pslope = tmp[:, :, 0], tmp[:, :, 1]
        flag = stf.extract(bufs, xml['Flag'])[:, :, 0]
        t_ref = Time(stf.extract(bufs, xml['T_refcal']), format='lv')
        return {
            'pslope': pslope,
            't_pha': timestamp,
            'flag': flag,
            'poff': poff,
            't_ref': t_ref,
            'phacal': {
                'pha': pha,
                'amp': amp,
                'flag': phacal_flag,
                'fghz': fghz,
                'sigma': sigma,
                'timestamp': timestamp,
                't_bg': tbg,
                't_ed': ted
            }
        }
Esempio n. 27
0
def apply_attn_corr(data, tref=None):
    ''' Applys the attenuator state corrections to the given data dictionary,
        corrected to the gain-state at time given by Time() object tref.
        
        Inputs:
          data     A dictionary returned by udb_util.py's readXdata().
          tref     A Time() object with the reference time, or if None,
                     the gain state of the nearest earlier REFCAL is 
                     used.
        Output:
          cdata    A dictionary with the gain-corrected data.  The keys
                     px, py, and x, are updated.
                     
        NB: This is the same routine as in gaincal2.py, but modified
        to handle the different ordering/format of data from udb_util.py's
        readXdata() routine.
    '''
    from gaincal2 import get_gain_state
    from util import common_val_idx, nearest_val_idx
    import copy
    if tref is None:
        # No reference time specified, so get nearest earlier REFCAL
        trange = Time(data['time'][[0,-1]],format='jd')
        xml, buf = ch.read_cal(8,t=trange[0])
        tref = Time(stateframe.extract(buf,xml['Timestamp']),format='lv')
    # Get the gain state at the reference time (actually median over 1 minute)
    trefrange = Time([tref.iso,Time(tref.lv+60,format='lv').iso])
    ref_gs =  get_gain_state(trefrange)  # refcal gain state for 60 s
    # Get median of refcal gain state (which should be constant anyway)
    ref_gs['h1'] = np.median(ref_gs['h1'],1)
    ref_gs['h2'] = np.median(ref_gs['h2'],1)
    ref_gs['v1'] = np.median(ref_gs['v1'],1)
    ref_gs['v2'] = np.median(ref_gs['v2'],1)

    # Get timerange from data
    trange = Time([data['time'][0],data['time'][-1]],format='jd')
    # Get time cadence
    dt = np.int(np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the gain state of the requested timerange
    src_gs = get_gain_state(trange,dt)   # solar gain state for timerange of file
    nt = len(src_gs['times'])
    antgain = np.zeros((15,2,34,nt),np.float32)   # Antenna-based gains vs. band
    for i in range(15):
        for j in range(34):
            antgain[i,0,j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs['h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][i,0,j] - ref_gs['dcmattn'][i,0,j]
            antgain[i,1,j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs['v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][i,1,j] - ref_gs['dcmattn'][i,1,j]

    cdata = copy.deepcopy(data)
    # Create giant array of baseline-based gains, translated to baselines and frequencies
    fghz = data['fghz']
    nf = len(fghz)
    blist = (fghz*2 - 1).astype(int) - 1       # Band list corresponding to frequencies in data
    blgain = np.zeros((nf,136,4,nt),float)     # Baseline-based gains vs. frequency
    for k,bl in enumerate(get_bl_order()):
        i, j = bl
        if i < 15 and j < 15:
            blgain[:,k,0] = 10**((antgain[i,0,blist] + antgain[j,0,blist])/20.)
            blgain[:,k,1] = 10**((antgain[i,1,blist] + antgain[j,1,blist])/20.)
            blgain[:,k,2] = 10**((antgain[i,0,blist] + antgain[j,1,blist])/20.)
            blgain[:,k,3] = 10**((antgain[i,1,blist] + antgain[j,0,blist])/20.)
    # Reorder antgain axes to put frequencies in first slot, to match data
    antgain = np.swapaxes(np.swapaxes(antgain,1,2),0,1)
    antgainf = 10**(antgain[blist]/10.)

    idx = nearest_val_idx(data['time'],src_gs['times'].jd)
    # Correct the auto- and cross-correlation data
    cdata['x'] *= blgain[:,:,:,idx]
    # Reshape px and py arrays
    cdata['px'].shape = (134,16,3,nt)
    cdata['py'].shape = (134,16,3,nt)
    # Correct the power
    cdata['px'][:,:15,0] *= antgainf[:,:,0,idx]
    cdata['py'][:,:15,0] *= antgainf[:,:,1,idx]
    # Correct the power-squared
    cdata['px'][:,:15,1] *= antgainf[:,:,0,idx]**2
    cdata['py'][:,:15,1] *= antgainf[:,:,1,idx]**2
    # Reshape px and py arrays back to original
    cdata['px'].shape = (134*16*3,nt)
    cdata['py'].shape = (134*16*3,nt)
    return cdata
Esempio n. 28
0
def unrot(data, azeldict=None):
    ''' Apply the correction to differential feed rotation to data, and return
        the corrected data.

        Inputs:
          data     A dictionary returned by udb_util.py's readXdata().
          azeldict The dictionary returned from get_sql_info(), or if None, the appropriate
                     get_sql_info() call is done internally.

        Output:
          cdata    A dictionary with the phase-corrected data.  Only the key
                     x is updated.
    '''
    import copy
    from util import lobe
    trange = Time(data['time'][[0,-1]],format='jd')

    if azeldict is None:
        azeldict = get_sql_info(trange)
    chi = azeldict['ParallacticAngle']  # (nt, nant)
    # Correct parallactic angle for equatorial mounts, relative to Ant14
    for i in [8,9,10,12,13]:
        chi[:,i] -= chi[:,13]
        
    # Ensure that nearest valid parallactic angle is used for times in the data
    good, = np.where(azeldict['ActualAzimuth'][0] != 0)
    tidx = nearest_val_idx(data['time'],azeldict['Time'][good].jd)

    # Read X-Y Delay phase from SQL database and get common frequencies
    xml, buf = ch.read_cal(11,t=trange[0])
    fghz = stateframe.extract(buf,xml['FGHz'])
    good, = np.where(fghz != 0.)
    fghz = fghz[good]
    dph = stateframe.extract(buf,xml['XYphase'])
    dph = dph[:,good]
    fidx1, fidx2 = common_val_idx(data['fghz'],fghz,precision=4)
    missing = np.setdiff1d(np.arange(len(data['fghz'])),fidx1)
    
    nf, nbl, npol, nt = data['x'].shape
    nf = len(fidx1)
    # Correct data for X-Y delay phase
    for k,bl in enumerate(get_bl_order()):
        i, j = bl
        if i < 14 and j < 14 and i != j:
            a1 = lobe(dph[i,fidx2] - dph[j,fidx2])
            a2 = -dph[j,fidx2] + np.pi/2
            a3 = dph[i,fidx2] - np.pi/2
            data['x'][fidx1,k,1] *= np.repeat(np.exp(1j*a1),nt).reshape(nf,nt)
            data['x'][fidx1,k,2] *= np.repeat(np.exp(1j*a2),nt).reshape(nf,nt) 
            data['x'][fidx1,k,3] *= np.repeat(np.exp(1j*a3),nt).reshape(nf,nt)
    
    # Correct data for differential feed rotation
    cdata = copy.deepcopy(data)
    for n in range(nt):
        for k,bl in enumerate(get_bl_order()):
            i, j = bl
            if i < 14 and j < 14 and i != j:
                dchi = chi[n,i] - chi[n,j]
                cchi = np.cos(dchi)
                schi = np.sin(dchi)
                cdata['x'][:,k,0,n] = data['x'][:,k,0,n]*cchi + data['x'][:,k,3,n]*schi
                cdata['x'][:,k,2,n] = data['x'][:,k,2,n]*cchi + data['x'][:,k,1,n]*schi
                cdata['x'][:,k,3,n] = data['x'][:,k,3,n]*cchi - data['x'][:,k,0,n]*schi
                cdata['x'][:,k,1,n] = data['x'][:,k,1,n]*cchi - data['x'][:,k,2,n]*schi
    # Set flags for any missing frequencies (hopefully this also works when missing is np.array([]))
    cdata[missing] = np.ma.masked
    return cdata
Esempio n. 29
0
def DCM_attn_anal(filename):
    ''' Analyze a DCMATTNTEST observation to determine the 2- and 4-bit
        attenuation values.  Input is a Miriad file.  Returns two arrays, 
           at2 and at4 of size (nant,npol) = (13,2)
        representing the attenuation, in dB, of the 2- and 4-bit, resp.
    '''
    import read_idb as ri
    import dbutil as db
    import cal_header as ch
    import stateframe as stf
    import copy
    from util import Time
    import matplotlib.pylab as plt

    out = ri.read_idb([filename])
    ts = int(Time(out['time'][0], format='jd').lv + 0.5)
    te = int(Time(out['time'][-1], format='jd').lv + 0.5)
    query = 'select Timestamp,DCM_Offset_Attn from fV65_vD15 where Timestamp between ' + str(
        ts) + ' and ' + str(te) + ' order by Timestamp'
    cursor = db.get_cursor()
    data, msg = db.do_query(cursor, query)
    cursor.close()
    dcm_offset = data['DCM_Offset_Attn'].reshape(
        len(data['DCM_Offset_Attn']) / 15, 15)
    dcm_offset = dcm_offset[:, 0]  # All antennas are the same
    t = Time(out['time'][0], format='jd')
    xml, buf = ch.read_cal(2, t)
    table = stf.extract(buf, xml['Attenuation'])
    bandlist = ((out['fghz'] - 0.5) * 2).astype(int)
    tbl = table[bandlist - 1]
    tbl.shape = (len(bandlist), 15, 2)
    tbl = np.swapaxes(np.swapaxes(tbl, 0, -1), 0, 1)
    tbl2 = np.broadcast_to(tbl, (out['time'].shape[0], 15, 2, 134))
    tbl = copy.copy(np.rollaxis(tbl2, 0, 4))  # Shape (nant,npol,nf,nt)
    pwr = out['p'][:15]  # Shape (nant,npol,nf,nt)
    # Add value of dcm_offset to table
    for i, offset in enumerate(dcm_offset):
        tbl[:, :, :, i] += offset
    # Clip to valid attenuations
    tbl = np.clip(tbl, 0, 30)
    # Isolate good times in various attn states
    goodm2, = np.where(dcm_offset == -2)
    goodm2 = goodm2[2:-3]
    good2, = np.where(dcm_offset == 2)
    good2 = good2[2:-3]
    good0, = np.where(dcm_offset[goodm2[-1]:good2[0]] == 0)
    good0 += goodm2[-1]
    good0 = good0[2:-3]
    good4, = np.where(dcm_offset == 4)
    good4 = good4[2:-3]
    good6, = np.where(dcm_offset == 6)
    good6 = good6[2:-3]
    goodbg = good6 + 30  # Assumes FEMATTN 15 follows good6 30 s later
    # Perform median over good times and create pwrmed with medians
    # The 5 indexes correspond to dcm_offsets -2, 0, 2, 4 and 6
    nant, npol, nf, nt = pwr.shape
    pwrmed = np.zeros((nant, npol, nf, 5))
    # Do not forget to subtract the background
    bg = np.median(pwr[:, :, :, goodbg], 3)
    pwrmed[:, :, :, 0] = np.median(pwr[:, :, :, goodm2], 3) - bg
    pwrmed[:, :, :, 1] = np.median(pwr[:, :, :, good0], 3) - bg
    pwrmed[:, :, :, 2] = np.median(pwr[:, :, :, good2], 3) - bg
    pwrmed[:, :, :, 3] = np.median(pwr[:, :, :, good4], 3) - bg
    pwrmed[:, :, :, 4] = np.median(pwr[:, :, :, good6], 3) - bg
    good = np.array([goodm2[0], good0[0], good2[0], good4[0], good6[0]])
    tbl = tbl[:, :, :, good]
    at2 = np.zeros((13, 2), float)
    at4 = np.zeros((13, 2), float)
    at8 = np.zeros((13, 2), float)
    f1, ax1 = plt.subplots(2, 13)
    f2, ax2 = plt.subplots(2, 13)
    f3, ax3 = plt.subplots(2, 13)
    for ant in range(13):
        for pol in range(2):
            pts = []
            for i in range(4):
                for v in [0, 4, 8, 12, 16, 20, 24, 28]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          2) == tbl[ant, pol, idx, i + 1])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 1])
            pts = np.concatenate(pts)
            ax1[pol, ant].plot(pts, '.')
            ax1[pol, ant].set_ylim(0, 2)
            at2[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            for i in range(3):
                for v in [0, 2, 8, 10, 16, 18, 24, 26]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          4) == tbl[ant, pol, idx, i + 2])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 2])
            pts = np.concatenate(pts)
            ax2[pol, ant].plot(pts, '.')
            ax2[pol, ant].set_ylim(0, 3)
            at4[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            i = 0
            for v in [0, 2, 4, 6, 16, 18, 20, 22]:
                idx, = np.where(tbl[ant, pol, :, i] == v)
                if len(idx) != 0:
                    good, = np.where((tbl[ant, pol, idx, i] + 8) == tbl[ant,
                                                                        pol,
                                                                        idx,
                                                                        i + 4])
                    if len(good) != 0:
                        pts.append(pwrmed[ant, pol, idx[good], i] /
                                   pwrmed[ant, pol, idx[good], i + 4])
            try:
                pts = np.concatenate(pts)
            except:
                # Looks like there were no points for this antenna/polarization, so set to nominal attn
                pts = [6.30957, 6.30957, 6.30957]
            ax3[pol, ant].plot(pts, '.')
            ax3[pol, ant].set_ylim(5, 8)
            at8[ant, pol] = np.log10(np.median(pts)) * 10.
    plt.show()
    # Generate output table, a complex array of size (nant,npol,nbits)
    attn = np.zeros((16, 2, 4), np.complex)
    # Set to nominal values, then overwrite with measured ones
    for i in range(16):
        for j in range(2):
            attn[i, j] = [2.0 + 0j, 4.0 + 0j, 8.0 + 0j, 16.0 + 0j]
    attn[:13, :, 0] = at2 + 0j
    attn[:13, :, 1] = at4 + 0j
    attn[:13, :, 2] = at8 + 0j
    return attn
Esempio n. 30
0
def DCM_master_attn_cal(update=False):
    ''' New version of this command, which uses the power values in
        the 10gbe packet headers instead of the very slow measurement
        of the ADC levels themselves.  This version only takes about 8 s!
        
        If update is True, it writes the results to the SQL database.
        
        Returns the DCM_master_table in the form of lines of text
        strings, with labels (handy for viewing).
    '''
    pwr = np.zeros((50,8,4),'int')
    # Capture on eth2 interface
    command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000'
    p.sendcmd(command)
    # Capture on eth3 interface
    command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000'
    p.sendcmd(command)
    headers = p.list_header('/home/user/Python/dcm2.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    headers = p.list_header('/home/user/Python/dcm3.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    # Reshape to (slot, nant, npol)
    pwr.shape = (50,16,2)
    # Read current frequency sequence from database
    cursor = db.get_cursor()
    query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist)-0.44)*2).astype(int)
    cursor.close()
    # Read current DCM_master_table from database
    xml, buf = ch.read_cal(2)
    orig_table = stf.extract(buf,xml['Attenuation'])
    # Order pwr values according to bandlist, taking median of any repeated values
    new_pwr = np.zeros((34,16,2))
    for i in range(34):
        idx, = np.where(bandlist-1 == i)
        if len(idx) > 0:
            new_pwr[i] = np.median(pwr[idx],0)
    new_pwr.shape = (34,32)
    # Now determine the change in attenuation needed to achieve a target
    # value of 1600.  Eliminate last two entries, corresponding to Ant16
    attn = np.log10(new_pwr[:,:-2]/1600.)*10.
    new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2
    DCMlines = []
    DCMlines.append('#      Ant1  Ant2  Ant3  Ant4  Ant5  Ant6  Ant7  Ant8  Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
    DCMlines.append('#      X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y')
    DCMlines.append('#     ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
    for band in range(1,35):
        DCMlines.append('{:2} :  {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1]))
    if update:
        msg = ch.dcm_master_table2sql(DCMlines)
        if msg:
            print 'Success'
        else:
            print 'Error writing table to SQL database!'
    return DCMlines
Esempio n. 31
0
def sql2phacalX(trange, *args, **kwargs):
    '''Supply a timestamp in Time format, return the closest phacal data.
        If a time range is provided, return records within the time range.'''
    import cal_header as ch
    import stateframe as stf
    xml, bufs = ch.read_calX(9, t=trange, *args, **kwargs)
    if isinstance(bufs, list):
        phacals = []
        for i, buf in enumerate(bufs):
            try:
                phacal_flag = stf.extract(buf, xml['Phacal_Flag'])
                fghz = stf.extract(buf, xml['Fghz'])
                sigma = stf.extract(buf, xml['Phacal_Sigma'])
                timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv')
                tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
                ted = Time(stf.extract(buf, xml['T_end']), format='lv')
                pha = stf.extract(buf, xml['Phacal_Pha'])
                amp = stf.extract(buf, xml['Phacal_Amp'])
                tmp = stf.extract(buf, xml['MBD'])
                poff, pslope = tmp[:, :, 0], tmp[:, :, 1]
                flag = stf.extract(buf, xml['Flag'])[:, :, 0]
                t_ref = Time(stf.extract(buf, xml['T_refcal']), format='lv')
                phacals.append(
                    {'pslope': pslope, 't_pha': timestamp, 'flag': flag, 'poff': poff, 't_ref': t_ref,
                     'phacal': {'pha': pha, 'amp': amp, 'flag': phacal_flag, 'fghz': fghz, 'sigma': sigma,
                                'timestamp': timestamp,
                                't_bg': tbg,
                                't_ed': ted}})
            except:
                print 'failed to load record {} ---> {}'.format(i + 1, Time(stf.extract(buf, xml['Timestamp']),
                                                                            format='lv').iso)
        return phacals
    elif isinstance(bufs, str):
        phacal_flag = stf.extract(bufs, xml['Phacal_Flag'])
        fghz = stf.extract(bufs, xml['Fghz'])
        sigma = stf.extract(bufs, xml['Phacal_Sigma'])
        timestamp = Time(stf.extract(bufs, xml['Timestamp']), format='lv')
        tbg = Time(stf.extract(bufs, xml['T_beg']), format='lv')
        ted = Time(stf.extract(bufs, xml['T_end']), format='lv')
        pha = stf.extract(bufs, xml['Phacal_Pha'])
        amp = stf.extract(bufs, xml['Phacal_Amp'])
        tmp = stf.extract(bufs, xml['MBD'])
        poff, pslope = tmp[:, :, 0], tmp[:, :, 1]
        flag = stf.extract(bufs, xml['Flag'])[:, :, 0]
        t_ref = Time(stf.extract(bufs, xml['T_refcal']), format='lv')
        return {'pslope': pslope, 't_pha': timestamp, 'flag': flag, 'poff': poff, 't_ref': t_ref,
                'phacal': {'pha': pha, 'amp': amp, 'flag': phacal_flag, 'fghz': fghz, 'sigma': sigma,
                           'timestamp': timestamp,
                           't_bg': tbg,
                           't_ed': ted}}
Esempio n. 32
0
def apply_gain_corr(data, tref=None):
    ''' Applys the gain_state() corrections to the given data dictionary,
        corrected to the gain-state at time given by Time() object tref.
        
        Inputs:
          data     A dictionary such as that returned by read_idb().
          tref     A Time() object with the reference time, or if None,
                     the gain state of the nearest earlier REFCAL is 
                     used.
        Output:
          cdata    A dictionary with the gain-corrected data.  The keys
                     p, x, p2, and a are all updated.
    '''
    from util import common_val_idx, nearest_val_idx
    import copy
    if tref is None:
        # No reference time specified, so get nearest earlier REFCAL
        trange = Time(data['time'][[0, -1]], format='jd')
        xml, buf = ch.read_cal(8, t=trange[0])
        if xml == {}:
            # No refcal for this date, so just use an early time as reference
            tref = Time(trange[0].iso[:10] + ' 13:30')
        else:
            tref = Time(stf.extract(buf, xml['Timestamp']), format='lv')
    # Get the gain state at the reference time (actually median over 1 minute)
    trefrange = Time([tref.iso, Time(tref.lv + 61, format='lv').iso])
    ref_gs = get_gain_state(trefrange)  # refcal gain state for 60 s
    # Get median of refcal gain state (which should be constant anyway)
    ref_gs['h1'] = np.median(ref_gs['h1'], 1)
    ref_gs['h2'] = np.median(ref_gs['h2'], 1)
    ref_gs['v1'] = np.median(ref_gs['v1'], 1)
    ref_gs['v2'] = np.median(ref_gs['v2'], 1)

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    # Get time cadence
    dt = np.int(
        np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the gain state of the requested timerange
    src_gs = get_gain_state(trange,
                            dt)  # solar gain state for timerange of file
    nt = len(src_gs['times'])
    antgain = np.zeros((15, 2, 34, nt),
                       np.float32)  # Antenna-based gains vs. band
    for i in range(15):
        for j in range(34):
            antgain[i, 0, j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs[
                'h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][
                    i, 0, j] - ref_gs['dcmattn'][i, 0, j]
            antgain[i, 1, j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs[
                'v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][
                    i, 1, j] - ref_gs['dcmattn'][i, 1, j]

    cdata = copy.deepcopy(data)
    # Frequency list is provided, so produce baseline-based gain table as well
    # Create giant array of gains, translated to baselines and frequencies
    fghz = data['fghz']
    nf = len(fghz)
    blist = (fghz * 2 - 1).astype(int) - 1
    blgain = np.zeros((120, 4, nf, nt),
                      float)  # Baseline-based gains vs. frequency
    for i in range(14):
        for j in range(i + 1, 15):
            blgain[ri.bl2ord[i, j],
                   0] = 10**((antgain[i, 0, blist] + antgain[j, 0, blist]) /
                             20.)
            blgain[ri.bl2ord[i, j],
                   1] = 10**((antgain[i, 1, blist] + antgain[j, 1, blist]) /
                             20.)
            blgain[ri.bl2ord[i, j],
                   2] = 10**((antgain[i, 0, blist] + antgain[j, 1, blist]) /
                             20.)
            blgain[ri.bl2ord[i, j],
                   3] = 10**((antgain[i, 1, blist] + antgain[j, 0, blist]) /
                             20.)
    antgainf = 10**(antgain[:, :, blist] / 10.)

    #idx1, idx2 = common_val_idx(data['time'],src_gs['times'].jd)
    idx = nearest_val_idx(data['time'], src_gs['times'].jd)
    # Apply corrections (some times may be eliminated from the data)
    # Correct the cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # Correct the power
    cdata['p'][:15] *= antgainf[:, :, :, idx]
    # Correct the autocorrelation
    cdata['a'][:15, :2] *= antgainf[:, :, :, idx]
    cross_fac = np.sqrt(antgainf[:, 0] * antgainf[:, 1])
    cdata['a'][:15, 2] *= cross_fac[:, :, idx]
    cdata['a'][:15, 3] *= cross_fac[:, :, idx]
    # Correct the power-squared -- this should preserve SK
    cdata['p2'][:15] *= antgainf[:, :, :, idx]**2
    # Remove any uncorrected times before returning
    #cdata['time'] = cdata['time'][idx1]
    #cdata['p'] = cdata['p'][:,:,:,idx1]
    #cdata['a'] = cdata['a'][:,:,:,idx1]
    #cdata['p2'] = cdata['p2'][:,:,:,idx1]
    #cdata['ha'] = cdata['ha'][idx1]
    #cdata['m'] = cdata['m'][:,:,:,idx1]
    return cdata
Esempio n. 33
0
def sql2refcalX(trange, lohi=False, *args, **kwargs):
    '''same as sql2refcal. trange can be either a timestamp or a timerange.'''
    import cal_header as ch
    import stateframe as stf
    if lohi:
        caltype = 12
    else:
        caltype = 8
    xml, bufs = ch.read_calX(caltype, t=trange, *args, **kwargs)
    if isinstance(bufs, list):
        refcals = []
        for i, buf in enumerate(bufs):
            try:
                ref = stf.extract(buf, xml['Refcal_Real']) + stf.extract(
                    buf, xml['Refcal_Imag']) * 1j
                flag = stf.extract(buf, xml['Refcal_Flag'])
                fghz = stf.extract(buf, xml['Fghz'])
                sigma = stf.extract(buf, xml['Refcal_Sigma'])
                timestamp = Time(stf.extract(buf, xml['Timestamp']),
                                 format='lv')
                tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
                ted = Time(stf.extract(buf, xml['T_end']), format='lv')
                pha = np.angle(ref)
                amp = np.absolute(ref)
                refcals.append({
                    'pha': pha,
                    'amp': amp,
                    'flag': flag,
                    'fghz': fghz,
                    'sigma': sigma,
                    'timestamp': timestamp,
                    't_bg': tbg,
                    't_ed': ted
                })
            except:
                print 'failed to load record {} ---> {}'.format(
                    i + 1,
                    Time(stf.extract(buf, xml['Timestamp']), format='lv').iso)
        return refcals
    elif isinstance(bufs, str):
        refcal = stf.extract(bufs, xml['Refcal_Real']) + stf.extract(
            bufs, xml['Refcal_Imag']) * 1j
        flag = stf.extract(bufs, xml['Refcal_Flag'])
        fghz = stf.extract(bufs, xml['Fghz'])
        sigma = stf.extract(bufs, xml['Refcal_Sigma'])
        timestamp = Time(stf.extract(bufs, xml['Timestamp']), format='lv')
        tbg = Time(stf.extract(bufs, xml['T_beg']), format='lv')
        ted = Time(stf.extract(bufs, xml['T_end']), format='lv')
        pha = np.angle(refcal)
        amp = np.absolute(refcal)
        return {
            'pha': pha,
            'amp': amp,
            'flag': flag,
            'fghz': fghz,
            'sigma': sigma,
            'timestamp': timestamp,
            't_bg': tbg,
            't_ed': ted
        }
Esempio n. 34
0
def apply_attn_corr(data, tref=None):
    ''' Applys the attenuator state corrections to the given data dictionary,
        corrected to the gain-state at time given by Time() object tref.
        
        Inputs:
          data     A dictionary returned by udb_util.py's readXdata().
          tref     A Time() object with the reference time, or if None,
                     the gain state of the nearest earlier REFCAL is 
                     used.
        Output:
          cdata    A dictionary with the gain-corrected data.  The keys
                     px, py, and x, are updated.
                     
        NB: This is the same routine as in gaincal2.py, but modified
        to handle the different ordering/format of data from udb_util.py's
        readXdata() routine.
    '''
    from gaincal2 import get_gain_state
    from util import common_val_idx, nearest_val_idx
    import copy
    if tref is None:
        # No reference time specified, so get nearest earlier REFCAL
        trange = Time(data['time'][[0, -1]], format='jd')
        xml, buf = ch.read_cal(8, t=trange[0])
        tref = Time(stateframe.extract(buf, xml['Timestamp']), format='lv')
    # Get the gain state at the reference time (actually median over 1 minute)
    trefrange = Time([tref.iso, Time(tref.lv + 60, format='lv').iso])
    ref_gs = get_gain_state(trefrange)  # refcal gain state for 60 s
    # Get median of refcal gain state (which should be constant anyway)
    ref_gs['h1'] = np.median(ref_gs['h1'], 1)
    ref_gs['h2'] = np.median(ref_gs['h2'], 1)
    ref_gs['v1'] = np.median(ref_gs['v1'], 1)
    ref_gs['v2'] = np.median(ref_gs['v2'], 1)

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    # Get time cadence
    dt = np.int(
        np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the gain state of the requested timerange
    src_gs = get_gain_state(trange,
                            dt)  # solar gain state for timerange of file
    nt = len(src_gs['times'])
    antgain = np.zeros((15, 2, 34, nt),
                       np.float32)  # Antenna-based gains vs. band
    for i in range(15):
        for j in range(34):
            antgain[i, 0, j] = src_gs['h1'][i] + src_gs['h2'][i] - ref_gs[
                'h1'][i] - ref_gs['h2'][i] + src_gs['dcmattn'][
                    i, 0, j] - ref_gs['dcmattn'][i, 0, j]
            antgain[i, 1, j] = src_gs['v1'][i] + src_gs['v2'][i] - ref_gs[
                'v1'][i] - ref_gs['v2'][i] + src_gs['dcmattn'][
                    i, 1, j] - ref_gs['dcmattn'][i, 1, j]

    cdata = copy.deepcopy(data)
    # Create giant array of baseline-based gains, translated to baselines and frequencies
    fghz = data['fghz']
    nf = len(fghz)
    blist = (fghz * 2 - 1).astype(
        int) - 1  # Band list corresponding to frequencies in data
    blgain = np.zeros((nf, 136, 4, nt),
                      float)  # Baseline-based gains vs. frequency
    for k, bl in enumerate(get_bl_order()):
        i, j = bl
        if i < 15 and j < 15:
            blgain[:, k,
                   0] = 10**((antgain[i, 0, blist] + antgain[j, 0, blist]) /
                             20.)
            blgain[:, k,
                   1] = 10**((antgain[i, 1, blist] + antgain[j, 1, blist]) /
                             20.)
            blgain[:, k,
                   2] = 10**((antgain[i, 0, blist] + antgain[j, 1, blist]) /
                             20.)
            blgain[:, k,
                   3] = 10**((antgain[i, 1, blist] + antgain[j, 0, blist]) /
                             20.)
    # Reorder antgain axes to put frequencies in first slot, to match data
    antgain = np.swapaxes(np.swapaxes(antgain, 1, 2), 0, 1)
    antgainf = 10**(antgain[blist] / 10.)

    idx = nearest_val_idx(data['time'], src_gs['times'].jd)
    # Correct the auto- and cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # Reshape px and py arrays
    cdata['px'].shape = (134, 16, 3, nt)
    cdata['py'].shape = (134, 16, 3, nt)
    # Correct the power
    cdata['px'][:, :15, 0] *= antgainf[:, :, 0, idx]
    cdata['py'][:, :15, 0] *= antgainf[:, :, 1, idx]
    # Correct the power-squared
    cdata['px'][:, :15, 1] *= antgainf[:, :, 0, idx]**2
    cdata['py'][:, :15, 1] *= antgainf[:, :, 1, idx]**2
    # Reshape px and py arrays back to original
    cdata['px'].shape = (134 * 16 * 3, nt)
    cdata['py'].shape = (134 * 16 * 3, nt)
    return cdata
Esempio n. 35
0
def unrot(data, azeldict=None):
    ''' Apply the correction to differential feed rotation to data, and return
        the corrected data.

        Inputs:
          data     A dictionary returned by udb_util.py's readXdata().
          azeldict The dictionary returned from get_sql_info(), or if None, the appropriate
                     get_sql_info() call is done internally.

        Output:
          cdata    A dictionary with the phase-corrected data.  Only the key
                     x is updated.
    '''
    import copy
    from util import lobe
    trange = Time(data['time'][[0, -1]], format='jd')

    if azeldict is None:
        azeldict = get_sql_info(trange)
    chi = azeldict['ParallacticAngle']  # (nt, nant)
    # Correct parallactic angle for equatorial mounts, relative to Ant14
    for i in [8, 9, 10, 12, 13]:
        chi[:, i] -= chi[:, 13]

    # Ensure that nearest valid parallactic angle is used for times in the data
    good, = np.where(azeldict['ActualAzimuth'][0] != 0)
    tidx = nearest_val_idx(data['time'], azeldict['Time'][good].jd)

    # Read X-Y Delay phase from SQL database and get common frequencies
    xml, buf = ch.read_cal(11, t=trange[0])
    fghz = stateframe.extract(buf, xml['FGHz'])
    good, = np.where(fghz != 0.)
    fghz = fghz[good]
    dph = stateframe.extract(buf, xml['XYphase'])
    dph = dph[:, good]
    fidx1, fidx2 = common_val_idx(data['fghz'], fghz, precision=4)
    missing = np.setdiff1d(np.arange(len(data['fghz'])), fidx1)

    nf, nbl, npol, nt = data['x'].shape
    nf = len(fidx1)
    # Correct data for X-Y delay phase
    for k, bl in enumerate(get_bl_order()):
        i, j = bl
        if i < 14 and j < 14 and i != j:
            a1 = lobe(dph[i, fidx2] - dph[j, fidx2])
            a2 = -dph[j, fidx2] + np.pi / 2
            a3 = dph[i, fidx2] - np.pi / 2
            data['x'][fidx1, k, 1] *= np.repeat(np.exp(1j * a1),
                                                nt).reshape(nf, nt)
            data['x'][fidx1, k, 2] *= np.repeat(np.exp(1j * a2),
                                                nt).reshape(nf, nt)
            data['x'][fidx1, k, 3] *= np.repeat(np.exp(1j * a3),
                                                nt).reshape(nf, nt)

    # Correct data for differential feed rotation
    cdata = copy.deepcopy(data)
    for n in range(nt):
        for k, bl in enumerate(get_bl_order()):
            i, j = bl
            if i < 14 and j < 14 and i != j:
                dchi = chi[n, i] - chi[n, j]
                cchi = np.cos(dchi)
                schi = np.sin(dchi)
                cdata['x'][:, k, 0,
                           n] = data['x'][:, k, 0,
                                          n] * cchi + data['x'][:, k, 3,
                                                                n] * schi
                cdata['x'][:, k, 2,
                           n] = data['x'][:, k, 2,
                                          n] * cchi + data['x'][:, k, 1,
                                                                n] * schi
                cdata['x'][:, k, 3,
                           n] = data['x'][:, k, 3,
                                          n] * cchi - data['x'][:, k, 0,
                                                                n] * schi
                cdata['x'][:, k, 1,
                           n] = data['x'][:, k, 1,
                                          n] * cchi - data['x'][:, k, 2,
                                                                n] * schi
    # Set flags for any missing frequencies (hopefully this also works when missing is np.array([]))
    cdata[missing] = np.ma.masked
    return cdata
Esempio n. 36
0
def DCM_master_attn_cal(fseqfile=None, dcmattn=None, update=False):
    ''' New version of this command, which uses the power values in
        the 10gbe packet headers instead of the very slow measurement
        of the ADC levels themselves.  This version only takes about 8 s!
        
        If update is True, it writes the results to the SQL database.
        
        Returns the DCM_master_table in the form of lines of text
        strings, with labels (handy for viewing).
    '''
    import pcapture2 as p
    import dbutil as db
    import cal_header as ch
    import stateframe as stf
    bandlist = fseqfile2bandlist(fseqfile)
    if bandlist is None:
        print 'Must specify a frequency sequence.'
        return None

    # Make sure this sequence is actually running, or start it if not
    accini = stf.rd_ACCfile()
    if not fseq_is_running(fseqfile, accini):
        # Sequence is not running, so send ACC commands to start it
        send_cmds(['FSEQ-OFF'], accini)
        send_cmds(['FSEQ-INIT'], accini)
        send_cmds(['FSEQ-FILE ' + fseqfile], accini)
        send_cmds(['FSEQ-ON'], accini)
        bandlist2dcmtable(bandlist, toACC=True)
        time.sleep(3)
        if not fseq_is_running(fseqfile, accini):
            print 'Frequency sequence could not be started.'
            return None
        else:
            print 'Successfully started frequency sequence.'
        send_cmds(['dcmtable dcm.txt'], accini)
        send_cmds(['dcmauto-on'], accini)

    pwr = np.zeros((50, 8, 4), 'int')
    # Capture on eth2 interface
    command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000'
    p.sendcmd(command)
    # Capture on eth3 interface
    command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000'
    p.sendcmd(command)
    headers = p.list_header('/home/user/Python/dcm2.pcap')
    for line in headers:
        try:
            j, id, p1, p2, p3, p4 = np.array(map(
                int, line.split()))[[0, 3, 6, 7, 8, 9]]
            pwr[j, id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    headers = p.list_header('/home/user/Python/dcm3.pcap')
    for line in headers:
        try:
            j, id, p1, p2, p3, p4 = np.array(map(
                int, line.split()))[[0, 3, 6, 7, 8, 9]]
            pwr[j, id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    # Reshape to (slot, nant, npol)
    pwr.shape = (50, 16, 2)
    #    # Read current frequency sequence from database
    #    cursor = db.get_cursor()
    #    query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc'
    #    fseq, msg = db.do_query(cursor, query)
    #    if msg == 'Success':
    #        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
    #        bandlist = ((np.array(fseqlist)-0.44)*2).astype(int)
    #    cursor.close()
    if dcmattn is None:
        # Read current DCM_master_table from database
        xml, buf = ch.read_cal(2)
        orig_table = stf.extract(buf, xml['Attenuation'])
    else:
        # DCM attenuation is set to a constant value so create a table of such values.
        orig_table = np.zeros((34, 30)) + dcmattn
        orig_table[:, 26:] = 0
    # Order pwr values according to bandlist, taking median of any repeated values
    new_pwr = np.zeros((34, 16, 2))
    for i in range(34):
        idx, = np.where(bandlist - 1 == i)
        if len(idx) > 0:
            new_pwr[i] = np.median(pwr[idx], 0)
    new_pwr.shape = (34, 32)
    # Now determine the change in attenuation needed to achieve a target
    # value of 1600.  Eliminate last two entries, corresponding to Ant16
    attn = np.log10(new_pwr[:, :-2] / 1600.) * 10.
    new_table = (np.clip(orig_table + attn, 0, 30) / 2).astype(int) * 2
    DCMlines = []
    DCMlines.append(
        '#      Ant1  Ant2  Ant3  Ant4  Ant5  Ant6  Ant7  Ant8  Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15'
    )
    DCMlines.append(
        '#      X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y'
    )
    DCMlines.append(
        '#     ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----'
    )
    for band in range(1, 35):
        DCMlines.append(
            '{:2} :  {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'
            .format(band, *new_table[band - 1]))
    if update:
        msg = ch.dcm_master_table2sql(DCMlines)
        if msg:
            print 'Success'
        else:
            print 'Error writing table to SQL database!'
    return DCMlines
Esempio n. 37
0
def sql2refcalX(trange, *args, **kwargs):
    '''same as sql2refcal. trange can be either a timestamp or a timerange.'''
    import cal_header as ch
    import stateframe as stf
    xml, bufs = ch.read_calX(8, t=trange, *args, **kwargs)
    if isinstance(bufs, list):
        refcals = []
        for i, buf in enumerate(bufs):
            try:
                ref = stf.extract(buf, xml['Refcal_Real']) + stf.extract(buf, xml['Refcal_Imag']) * 1j
                flag = stf.extract(buf, xml['Refcal_Flag'])
                fghz = stf.extract(buf, xml['Fghz'])
                sigma = stf.extract(buf, xml['Refcal_Sigma'])
                timestamp = Time(stf.extract(buf, xml['Timestamp']), format='lv')
                tbg = Time(stf.extract(buf, xml['T_beg']), format='lv')
                ted = Time(stf.extract(buf, xml['T_end']), format='lv')
                pha = np.angle(ref)
                amp = np.absolute(ref)
                refcals.append(
                    {'pha': pha, 'amp': amp, 'flag': flag, 'fghz': fghz, 'sigma': sigma, 'timestamp': timestamp,
                     't_bg': tbg,
                     't_ed': ted})
            except:
                print 'failed to load record {} ---> {}'.format(i + 1, Time(stf.extract(buf, xml['Timestamp']),
                                                                            format='lv').iso)
        return refcals
    elif isinstance(bufs, str):
        refcal = stf.extract(bufs, xml['Refcal_Real']) + stf.extract(bufs, xml['Refcal_Imag']) * 1j
        flag = stf.extract(bufs, xml['Refcal_Flag'])
        fghz = stf.extract(bufs, xml['Fghz'])
        sigma = stf.extract(bufs, xml['Refcal_Sigma'])
        timestamp = Time(stf.extract(bufs, xml['Timestamp']), format='lv')
        tbg = Time(stf.extract(bufs, xml['T_beg']), format='lv')
        ted = Time(stf.extract(bufs, xml['T_end']), format='lv')
        pha = np.angle(refcal)
        amp = np.absolute(refcal)
        return {'pha': pha, 'amp': amp, 'flag': flag, 'fghz': fghz, 'sigma': sigma, 'timestamp': timestamp, 't_bg': tbg,
                't_ed': ted}
Esempio n. 38
0
def cal_qual(t=None, savfig=True):
    ''' Check the quality of the total power and gain calibrations for a given date
    '''
    import cal_header as ch
    from stateframe import extract
    import dump_tsys as dt
    import pipeline_cal as pc
    import matplotlib.pylab as plt
    import rstn
    from util import get_idbdir
    import socket

    if t is None:
        t = Time.now()
    mjd = t.mjd
    # First check whether the total power calibration is current
    caltype = 10
    xml, buf = ch.read_cal(caltype, t=t)
    tp_mjd = Time(extract(buf, xml['SQL_timestamp']), format='lv').mjd
    if mjd - tp_mjd > 0.5:
        print 'CAL_QUAL: Warning, TP Calibration not (yet) available for this date.'
    # Find GCAL scan for this date
    fdb = dt.rd_fdb(Time(mjd, format='mjd'))
    gcidx, = np.where(fdb['PROJECTID'] == 'GAINCALTEST')
    if len(gcidx) == 1:
        datadir = get_idbdir(t) + fdb['FILE'][gcidx][0][3:11] + '/'
        # List of GCAL files
        gcalfile = [datadir + i for i in fdb['FILE'][gcidx]]
    else:
        print 'CAL_QUAL: Warning, no GAINCALTEST scan for this date.  Will try using the GAINCALTEST from previous day.'
        fdb = dt.rd_fdb(Time(mjd - 1, format='mjd'))
        gcidx, = np.where(fdb['PROJECTID'] == 'GAINCALTEST')
        if len(gcidx) == 1:
            datadir = get_idbdir(t)
            # Add date path if on pipeline
            # if datadir.find('eovsa') != -1: datadir += fdb['FILE'][gcidx][0][3:11]+'/'
            host = socket.gethostname()
            if host == 'pipeline': datadir += fdb['FILE'][gcidx][0][3:11] + '/'
            # List of GCAL files
            gcalfile = [datadir + i for i in fdb['FILE'][gcidx]]
        else:
            print 'CAL_QUAL: Error, no GAINCALTEST scan for previous day.'
            return
    # Find SOLPNTCAL scan for this date
    fdb = dt.rd_fdb(Time(mjd, format='mjd'))
    gcidx, = np.where(fdb['PROJECTID'] == 'SOLPNTCAL')
    if len(gcidx) > 0:
        datadir = get_idbdir(t)
        # Add date path if on pipeline
        # if datadir.find('eovsa') != -1: datadir += fdb['FILE'][gcidx][0][3:11]+'/'
        host = socket.gethostname()
        if host == 'pipeline': datadir += fdb['FILE'][gcidx][0][3:11] + '/'
        # List of SOLPNTCAL files
        solpntfile = [datadir + i for i in fdb['FILE'][gcidx]]
    else:
        print 'CAL_QUAL: Error, no SOLPNTCAL scan(s) for this date.'
        return
    files = gcalfile + solpntfile
    outnames = []
    for file in files:
        outnames.append(
            pc.udb_corr(file, calibrate=True, attncal=True, desat=True))
    out = ri.read_idb(outnames, srcchk=False)
    nt = len(out['time'])
    nf = len(out['fghz'])
    tpfac = 500. / nf

    frq, flux = rstn.rd_rstnflux(t)
    s = rstn.rstn2ant(frq, flux, out['fghz'] * 1000., t)
    fluximg = s.repeat(nt).reshape(nf, nt)
    f, ax = plt.subplots(4, 7)
    f.set_size_inches(16, 7, forward=True)
    f.tight_layout(rect=[0.0, 0.0, 1, 0.95])
    ax.shape = (2, 14)
    for i in range(13):
        for j in range(2):
            ax[j, i].imshow(out['p'][i, j],
                            aspect='auto',
                            origin='lower',
                            vmax=np.max(s),
                            vmin=0)
            ax[j, i].plot(np.clip(out['p'][i, j, int(nf / 3.)] / tpfac, 0, nf),
                          linewidth=1)
            ax[j, i].plot(np.clip(out['p'][i, j, int(2 * nf / 3.)] / tpfac, 0,
                                  nf),
                          linewidth=1)
            ax[j, i].set_title('Ant ' + str(i + 1) + [' X Pol', ' Y Pol'][j],
                               fontsize=10)
    for j in range(2):
        ax[j, 13].imshow(fluximg,
                         aspect='auto',
                         origin='lower',
                         vmax=np.max(s),
                         vmin=0)
        ax[j, 13].set_title('RSTN Flux', fontsize=10)
    for i in range(13):
        for j in range(2):
            ax[j, i].plot(np.clip(fluximg[int(nf / 3.)] / tpfac, 0, nf),
                          '--',
                          linewidth=1,
                          color='C0')
            ax[j, i].plot(np.clip(fluximg[int(2 * nf / 3.)] / tpfac, 0, nf),
                          '--',
                          linewidth=1,
                          color='C1')

    f.suptitle('Total Power Calibration Quality for ' + t.iso[:10])
    date = t.iso[:10].replace('-', '')
    if savfig:
        try:
            plt.savefig('/common/webplots/flaremon/daily/' + date[:4] +
                        '/QUAL_' + date + 'TP.png')
        except:
            plt.savefig('/tmp/' + date[:4] + '/QUAL_' + date + 'TP.png')
            print 'The .png file could not be created in the /common/webplots/flaremon/daily/ folder.'
            print 'A copy was created in /tmp/.'

    f, ax = plt.subplots(4, 7)
    f.set_size_inches(16, 7, forward=True)
    f.tight_layout(rect=[0.0, 0.0, 1, 0.95])
    ax.shape = (2, 14)
    for i in range(13):
        for j in range(2):
            ax[j, i].imshow(np.real(out['a'][i, j]),
                            aspect='auto',
                            origin='lower',
                            vmax=np.max(s),
                            vmin=0)
            ax[j,
               i].plot(np.clip(np.real(out['a'][i, j, int(nf / 3.)] / tpfac),
                               0, nf),
                       linewidth=1)
            ax[j, i].plot(np.clip(
                np.real(out['a'][i, j, int(2 * nf / 3.)] / tpfac), 0, nf),
                          linewidth=1)
            ax[j, i].set_title('Ant ' + str(i + 1) + [' X Pol', ' Y Pol'][j],
                               fontsize=10)
    for j in range(2):
        ax[j, 13].imshow(fluximg,
                         aspect='auto',
                         origin='lower',
                         vmax=np.max(s),
                         vmin=0)
        ax[j, 13].set_title('RSTN Flux', fontsize=10)
    for i in range(13):
        for j in range(2):
            ax[j, i].plot(np.clip(fluximg[int(nf / 3.)] / tpfac, 0, nf),
                          '--',
                          linewidth=1,
                          color='C0')
            ax[j, i].plot(np.clip(fluximg[int(2 * nf / 3.)] / tpfac, 0, nf),
                          '--',
                          linewidth=1,
                          color='C1')
    f.suptitle('Cross-Power Calibration Quality for ' + t.iso[:10])
    date = t.iso[:10].replace('-', '')
    if savfig:
        try:
            plt.savefig('/common/webplots/flaremon/daily/' + date[:4] +
                        '/QUAL_' + date + 'XP.png')
        except:
            plt.savefig('/tmp/' + date[:4] + '/QUAL_' + date + 'XP.png')
            print 'The .png file could not be created in the /common/webplots/flaremon/daily/ folder.'
            print 'A copy was created in /tmp/.'
Esempio n. 39
0
def unrot(data, azeldict=None):
    ''' Apply the correction to differential feed rotation to data, and return
        the corrected data.  This also applies flags to data whose antennas are
        not tracking.

        Inputs:
          data     A dictionary returned by udb_util.py's readXdata().
          azeldict The dictionary returned from get_sql_info(), or if None, the appropriate
                     get_sql_info() call is done internally.

        Output:
          cdata    A dictionary with the phase-corrected data.  Only the key
                     x is updated.
    '''
    import copy
    from util import lobe, bl2ord
    trange = Time(data['time'][[0, -1]], format='jd')

    if azeldict is None:
        azeldict = get_sql_info(trange)
    chi = azeldict['ParallacticAngle'] * np.pi / 180.  # (nt, nant)
    # Correct parallactic angle for equatorial mounts, relative to Ant14
    chi[:,
        [8, 9, 10, 12, 13]] = 0  # Currently 0, but can be measured and updated

    # Which antennas are tracking
    track = azeldict['TrackFlag']  # True if tracking

    # Ensure that nearest valid parallactic angle is used for times in the data
    good = np.where(azeldict['ActualAzimuth'] != 0)
    tidx = []  # List of arrays of indexes for each antenna
    for i in range(14):
        gd = good[0][np.where(good[1] == i)]
        tidx.append(nearest_val_idx(data['time'], azeldict['Time'][gd].jd))

    # Read X-Y Delay phase from SQL database and get common frequencies
    xml, buf = ch.read_cal(11, t=trange[0])
    fghz = stateframe.extract(buf, xml['FGHz'])
    good, = np.where(fghz != 0.)
    fghz = fghz[good]
    dph = stateframe.extract(buf, xml['XYphase'])
    dph = dph[:, good]
    xi_rot = stateframe.extract(buf, xml['Xi_Rot'])
    xi_rot = xi_rot[good]
    fidx1, fidx2 = common_val_idx(data['fghz'], fghz, precision=4)
    missing = np.setdiff1d(np.arange(len(data['fghz'])), fidx1)

    nf, nbl, npol, nt = data['x'].shape
    nf = len(fidx1)
    # Correct data for X-Y delay phase
    for i in range(13):
        for j in range(i + 1, 14):
            k = bl2ord[i, j]
            a1 = lobe(dph[i, fidx2] - dph[j, fidx2])
            a2 = -dph[j, fidx2] - xi_rot[fidx2]
            a3 = dph[i, fidx2] - xi_rot[fidx2] + np.pi
            data['x'][fidx1, k, 1] *= np.repeat(np.exp(1j * a1),
                                                nt).reshape(nf, nt)
            data['x'][fidx1, k, 2] *= np.repeat(np.exp(1j * a2),
                                                nt).reshape(nf, nt)
            data['x'][fidx1, k, 3] *= np.repeat(np.exp(1j * a3),
                                                nt).reshape(nf, nt)

    # Correct data for differential feed rotation
    cdata = copy.deepcopy(data)
    for n in range(nt):
        for i in range(13):
            for j in range(i + 1, 14):
                k = bl2ord[i, j]
                ti = tidx[i][n]
                tj = tidx[j][n]
                if track[ti, i] and track[tj, j]:
                    dchi = chi[ti, i] - chi[tj, j]
                    cchi = np.cos(dchi)
                    schi = np.sin(dchi)
                    cdata['x'][:, k, 0,
                               n] = data['x'][:, k, 0,
                                              n] * cchi + data['x'][:, k, 3,
                                                                    n] * schi
                    cdata['x'][:, k, 2,
                               n] = data['x'][:, k, 2,
                                              n] * cchi + data['x'][:, k, 1,
                                                                    n] * schi
                    cdata['x'][:, k, 3,
                               n] = data['x'][:, k, 3,
                                              n] * cchi - data['x'][:, k, 0,
                                                                    n] * schi
                    cdata['x'][:, k, 1,
                               n] = data['x'][:, k, 1,
                                              n] * cchi - data['x'][:, k, 2,
                                                                    n] * schi
                else:
                    cdata['x'][:, k, :, n] = np.ma.masked

    # Set flags for any missing frequencies (hopefully this also works when "missing" is np.array([]))
    cdata['x'][missing] = np.ma.masked
    return cdata
Esempio n. 40
0
def gain_state(trange=None):
    ''' Read and assemble the gain state for the given timerange from 
        the SQL database, or for the last 10 minutes if trange is None.
        
        Returns the complex attenuation of the FEM for the timerange
        as an array of size (nant, npol, ntimes) [not band dependent],
        and the complex attenuation of the DCM for the same timerange
        as an array of size (nant, npol, nbands, ntimes).  Also returns
        the time as a Time() object array.
    '''
    from util import Time
    import dbutil as db
    from fem_attn_calib import fem_attn_update
    import cal_header as ch

    if trange is None:
        t = Time.now()
        t2 = Time(t.jd - 600. / 86400., format='jd')
        trange = Time([t2.iso, t.iso])
    ts = trange[0].lv  # Start timestamp
    te = trange[1].lv  # End timestamp
    cursor = db.get_cursor()
    # First get FEM attenuation for timerange
    D15dict = db.get_dbrecs(cursor, dimension=15, timestamp=trange)
    DCMoffdict = db.get_dbrecs(cursor, dimension=50, timestamp=trange)
    DCMoff_v_slot = DCMoffdict['DCMoffset_attn']
    #    DCMoff_0 = D15dict['DCM_Offset_Attn'][:,0]  # All ants are the same
    fem_attn = {}
    fem_attn['timestamp'] = D15dict['Timestamp'][:, 0]
    nt = len(fem_attn['timestamp'])
    junk = np.zeros([nt, 1], dtype='int')  #add the non-existing antenna 16
    fem_attn['h1'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_First'],
                               junk,
                               axis=1)  #FEM hpol first attn value
    fem_attn['h2'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM hpol second attn value
    fem_attn['v1'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_First'],
                               junk,
                               axis=1)  #FEM vpol first attn value
    fem_attn['v2'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM vpol second attn value
    fem_attn['ants'] = np.append(D15dict['I15'][0, :], [15])
    # Add corrections from SQL database for start time of timerange
    fem_attn_corr = fem_attn_update(fem_attn, trange[0])
    # Next get DCM attenuation for timerange
    # Getting next earlier scan header
    ver = db.find_table_version(cursor, ts, True)
    query = 'select top 50 Timestamp,FSeqList from hV' + ver + '_vD50 where Timestamp <= ' + str(
        ts) + ' order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist) - 0.44) * 2).astype(int)
    cursor.close()
    # Read current DCM_table from database
    xml, buf = ch.read_cal(3, trange[0])
    orig_table = stf.extract(buf, xml['Attenuation']).astype('int')
    orig_table.shape = (50, 15, 2)
    xml, buf = ch.read_cal(6, trange[0])
    dcm_attn_bitv = np.nan_to_num(stf.extract(
        buf, xml['DCM_Attn_Real'])) + np.nan_to_num(
            stf.extract(buf, xml['DCM_Attn_Imag'])) * 1j
    #    # Add one more bit (all zeros) to take care of unit bit
    #    dcm_attn_bitv = np.concatenate((np.zeros((16,2,1),'int'),dcm_attn_bitv),axis=2)
    # We now have:
    #   orig_table     the original DCM at start of scan, size (nslot, nant=15, npol)
    #   DCMoff_0       the offset applied to all antennas and slots (ntimes)
    #   DCMoff_v_slot  the offest applied to all antennas but varies by slot (ntimes, nslot)
    #   dcm_attn_bitv  the measured (non-nominal) attenuations for each bit value (nant=16, npol, nbit) -- complex
    # Now I need to convert slot to band, add appropriately, and organize as (nant=16, npol, nband, ntimes)
    # Add one more antenna (all zeros) to orig_table
    orig_table = np.concatenate((orig_table, np.zeros((50, 1, 2), 'int')),
                                axis=1)
    ntimes, nslot = DCMoff_v_slot.shape
    dcm_attn = np.zeros((16, 2, 34, ntimes), np.int)
    for i in range(ntimes):
        for j in range(50):
            idx = bandlist[j] - 1
            # This adds attenuation for repeated bands--hopefully the same value for each repeat
            dcm_attn[:, :, idx, i] += orig_table[j, :, :] + DCMoff_v_slot[i, j]
    # Normalize repeated bands by finding number of repeats and dividing.
    for i in range(1, 35):
        n = len(np.where(bandlist == i)[0])
        if n > 1:
            dcm_attn[:, :, i - 1, :] /= n
    # Make sure attenuation is in range
    dcm_attn = np.clip(dcm_attn, 0, 30)
    # Finally, correct for non-nominal (measured) bit values
    # Start with 0 attenuation as reference
    dcm_attn_corr = dcm_attn * (0 + 0j)
    att = np.zeros((16, 2, 34, ntimes, 5), np.complex)
    # Calculate resulting attenuation based on bit attn values (2,4,8,16)
    for i in range(4):
        # Need dcm_attn_bitv[...,i] to be same shape as dcm_attn
        bigger_bitv = np.broadcast_to(dcm_attn_bitv[..., i],
                                      (ntimes, 34, 16, 2))
        bigger_bitv = np.swapaxes(
            np.swapaxes(np.swapaxes(bigger_bitv, 0, 3), 1, 2), 0, 1)
        att[..., i] = (np.bitwise_and(dcm_attn, 2**(i + 1)) >>
                       (i + 1)) * bigger_bitv
        dcm_attn_corr = dcm_attn_corr + att[..., i]

    # Move ntimes column to next to last position, and then sum over last column (the two attenuators)
    fem_attn_corr = np.sum(np.rollaxis(fem_attn_corr, 0, 3), 3)
    # Output is FEM shape (nant, npol, ntimes) = (16, 2, ntimes)
    #           DCM shape (nant, npol, nband, ntimes) = (16, 2, 34, ntimes)
    # Arrays are complex, in dB units
    tjd = Time(fem_attn['timestamp'].astype('int'), format='lv').jd
    return fem_attn_corr, dcm_attn_corr, tjd