Пример #1
0
def find_gaincal(t=None, scan_length=6, findwhat='FEATTNTEST'):
    # This will find the project on the day of "t" or, going backwards, the nearest
    #  day to "t" 
    #  This routine returns a timerange that may be used as the timerange in any 
    #  of the above programs. It will return the appropriate timerange for a 
    #  FEATTNTEST scan on the date for which t is given to this function, or a
    #  list of appropriate time range. If the length of the scan is not 6 minute,
    #  please set scan_length to the appropriate length. 
    ''' Makes an SQL query to find the FEATTNTEST or GAINCALTESTscans for the date given in
        the Time() object t.  A list of timestamps is returned, along with 
        the timestamp of the object provided.
    '''
    loop_ = 1
    if t is None: 
        # Get today's date
        t = util.Time.now()
    timestamp = int(t.lv)
    while loop_ == 1:
        stimestamp = timestamp - (timestamp % 86400)  # Start of day
        etimestamp = stimestamp + 86399               # End of day
        # Open handle to SQL database
        cursor = dbutil.get_cursor()
        # Try to find a scan header with project SOLPNTCAL (only works after 2014 Nov. 30)
        verstr = dbutil.find_table_version(cursor,timestamp,True)
        if verstr is None:
            print 'No scan_header table found for given time.'
            return [], timestamp
        # First retrieve the Project from all scan headers for the day
        cursor.execute('select timestamp,Project from hV'+verstr+'_vD1 where timestamp between '+str(stimestamp)+' and '+str(etimestamp)+' order by timestamp')
	data = np.transpose(np.array(cursor.fetchall()))
        names = stateframedef.numpy.array(cursor.description)[:,0]
        cursor.close()
        if len(data) == 0:
            # No FEATTNTEST found, so return empty list (and timestamp)
            return [], timestamp
        else:
            projdict = dict(zip(names,data))
            projdict['timestamp'] = projdict['timestamp'].astype('float')  # Convert timestamps from string to float
        good = np.where(projdict['Project'] == findwhat)[0]
        if len(good) != 0:
            if len(good) == 1:
                loop_ = 0
                tgc = [projdict['timestamp'][good]], timestamp
                start_= Time(tgc[0][0], format = 'lv').iso
                end_ = Time(tgc[0][0]+60*scan_length, format = 'lv').iso
                trange = Time([start_[0] , end_[0]])
                return trange
            else:
                loop_ = 0
                tgc = [projdict['timestamp'][good]], timestamp
                start_trange = Time(tgc[0][0], format = 'lv').iso
                end_trange = Time(tgc[0][0]+60*scan_length, format = 'lv').iso
                tranges = []
                for i in range(start_trange.shape[0]):
                    trange = Time([start_trange[i], end_trange[i]])
                    tranges.append(trange)
                return tranges
        else:
            timestamp = timestamp - 60*60*24
            loop_ = 1
Пример #2
0
def get_projects(t, nosql=False):
    ''' Read all projects from SQL for the current date and return a summary
        as a dictionary with keys Timestamp, Project, and EOS (another timestamp)
    '''
    if nosql == True:
        return get_projects_nosql(t)
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor, trange[0].lv, True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return {}
    query = 'select Timestamp,Project from hV' + verstrh + '_vD1 where Timestamp between ' + tstart + ' and ' + tend + ' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return {}
    elif len(projdict) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return {}
    projdict['Timestamp'] = projdict['Timestamp'].astype(
        'float')  # Convert timestamps from string to float
    for i in range(len(projdict['Project'])):
        projdict['Project'][i] = projdict['Project'][i].replace('\x00', '')
    projdict.update({'EOS': projdict['Timestamp'][1:]})
    projdict.update({'Timestamp': projdict['Timestamp'][:-1]})
    projdict.update({'Project': projdict['Project'][:-1]})
    cursor.close()
    return projdict
Пример #3
0
def get_sql_info(trange):
    ''' Get all antenna information from the SQL database for a given
        timerange, including TrackFlag and Parallactic Angle
        
        Also determines if the RFSwitch state (i.e. which 27-m receiver 
        is being used).
    '''
    cursor = db.get_cursor()
    sqldict = db.get_dbrecs(cursor, dimension=15, timestamp=trange)
    azeldict = stateframe.azel_from_sqldict(sqldict)
    time = Time(sqldict['Timestamp'][:, 0].astype(int), format='lv')
    azeldict.update({'Time': time})
    sqldict = db.get_dbrecs(cursor, dimension=1, timestamp=trange)
    azeldict.update({'RFSwitch': sqldict['FEMA_Powe_RFSwitchStatus']})
    azeldict.update({'LF_Rcvr': sqldict['FEMA_Rece_LoFreqEnabled']})
    if np.median(azeldict['RFSwitch']) == 0.0 and np.median(
            azeldict['LF_Rcvr']) == 1.0:
        azeldict.update({'Receiver': 'Low'})
    elif np.median(azeldict['RFSwitch']) == 1.0 and np.median(
            azeldict['LF_Rcvr']) == 0.0:
        azeldict.update({'Receiver': 'High'})
    else:
        azeldict.update({'Receiver': 'Unknown'})
    cursor.close()
    return azeldict
Пример #4
0
def read_cal(type,t=None):
    ''' Read the calibration data of the given type, for the given time (as a Time() object),
        or for the current time if None.
        
        Returns a dictionary of look-up information and a binary buffer containing the 
        calibration record.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    xmldict, ver = read_cal_xml(type, t)
    cursor = dbutil.get_cursor()

    if xmldict != {}:
        query = 'set textsize 2147483647 select top 1 * from abin where Version = '+str(type+ver/10.)+' and Timestamp <= '+str(timestamp)+' order by Timestamp desc'
        sqldict, msg = dbutil.do_query(cursor,query)
        cursor.close()
        if msg == 'Success':
            if sqldict == {}:
                print 'Error: Query returned no records.'
                print query
                return {}, None
            buf = sqldict['Bin'][0]   # Binary representation of data
            return xmldict, str(buf)
        else:
            print 'Unknown error occurred reading',typdict[type][0]
            print sys.exc_info()[1]
            return {}, None
    else:
        return {}, None
Пример #5
0
def get_sql_info(trange):
    ''' Get all antenna information from the SQL database for a given 
       timerange, including TrackFlag and Parallactic Angle
   '''
    cursor = db.get_cursor()
    sqldict = db.get_dbrecs(cursor, dimension=15, timestamp=trange)
    azeldict = stateframe.azel_from_sqldict(sqldict)
    time = Time(sqldict['Timestamp'][:, 0].astype(int), format='lv')
    azeldict.update({'Time': time})
    cursor.close()
    return azeldict
Пример #6
0
def get_sql_info(trange):
   ''' Get all antenna information from the SQL database for a given 
       timerange, including TrackFlag and Parallactic Angle
   '''
   cursor = db.get_cursor()
   sqldict = db.get_dbrecs(cursor,dimension=15,timestamp=trange)
   azeldict = stateframe.azel_from_sqldict(sqldict)
   time = Time(sqldict['Timestamp'][:,0].astype(int),format='lv')
   azeldict.update({'Time':time})
   cursor.close()
   return azeldict
Пример #7
0
def write_cal(type,buf,t=None):
    ''' Write the calibration data provided in data buffer buf of the given type, 
        for the given time (as a Time() object), or for the current time if None.
        Typcially, the time should refer to when the calibration data were taken,
        so the correct time object should be provided.
        
        The type keyword is a real number whose integer part indicates the type
        definition.  The fractional part must not be 0 (since this would indicate
        a type definition record rather than a data record).  The relevant type 
        definition is read from the database, and its total size is determined and 
        compared with the buffer size, as a sanity check.
        
        Returns True if success, or False if failure.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    typdict = cal_types()
    try:
        typinfo = typdict[int(type)]
    except:
        print 'Type',int(type),'not found in type definition dictionary.'
        return False
    cursor = dbutil.get_cursor()
    # Read type definition XML from abin table and do a sanity check
    query = 'select top 1 * from abin where Version = '+str(int(type))+'.0 and Timestamp <='+str(timestamp)+' order by Timestamp desc'
    outdict, msg = dbutil.do_query(cursor,query)
    if msg == 'Success':
        if len(outdict) == 0:
            # This type of xml file does not yet exist in the database, so indicate an error
            print 'Error: Type',type,'not defined in abin table.'
            cursor.close()
            return False
        else:
            # There is one, so read it and do a sanity check against binary data
            f = open('/tmp/tmp.xml','wb')
            f.write(outdict['Bin'][0])
            f.close()
            keys, mydict, fmt, ver = read_xml2.xml_read('/tmp/tmp.xml')
            binsize = get_size(fmt)
            if len(buf) == binsize:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, type+ver/10., typinfo[0], dbutil.stateframedef.pyodbc.Binary(buf))
                cursor.commit()
                cursor.close()
                return True
            else:
                print 'Error: Size of buffer',len(buf),'does not match this calibration type.  Expecting',binsize
                cursor.close()
                return False
Пример #8
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    verstr = dbutil.find_table_version(cursor, tstart)
    if verstr is None:
        print 'No stateframe table found for given time.'
        return tstart, [], {}
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage,Ante_Fron_FEM_VPol_Voltage from fV' + verstr + '_vD15 where timestamp between ' + tstart + ' and ' + tend + ' order by timestamp'
    data, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return tstart, [], {}
    for k, v in data.items():
        data[k].shape = (len(data[k]) / 15, 15)
    hv = []
    try:
        ut = Time(data['Timestamp'][:, 0].astype('float'),
                  format='lv').plot_date
    except:
        print 'Error for time', t.iso
        print 'Query:', query, ' returned msg:', msg
        print 'Keys:', data.keys()
        print data['Timestamp'][0, 0]
    hfac = np.median(data['Ante_Fron_FEM_HPol_Voltage'].astype('float'), 0)
    vfac = np.median(data['Ante_Fron_FEM_VPol_Voltage'].astype('float'), 0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_HPol_Voltage'][:, i] / hfac[i])
        if vfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_VPol_Voltage'][:, i] / vfac[i])
    #import pdb; pdb.set_trace()
    flm = np.median(np.array(hv), 0)
    good = np.where(abs(flm[1:] - flm[:-1]) < 0.1)[0]

    projdict = get_projects(t)
    return ut[good], flm[good], projdict
Пример #9
0
def drop_deftable(version):
    ''' Drops ALL traces of a given version of a stateframe definition.
        Use with CAUTION!!!
        
        Requests confirmation from the keyboard.
    '''
    import dbutil as db
    cursor = db.get_cursor()
    # Get all table and view names from this version
    query = 'select * from information_schema.tables where table_name like "fV'+str(int(version))+'%"'
    result, msg = db.do_query(cursor, query)
    if msg == 'Success':
        names = result['TABLE_NAME']
        print 'You are about to permanently delete the following tables:'
        for name in names:
            print '    ',name
        ans = raw_input('Are you sure? [y/n]: ').upper()
        if ans != 'Y':
            print 'Action canceled by user'
            return False
        # Delete the version from the stateframedef table
        query = 'delete from StateFrameDef where Version='+str(int(version))
        r, msg = db.do_query(cursor, query)
        if msg != 'Success':
            print 'Error, could not delete from stateframedef for version:',version
            print msg
            return False
        # Loop over table and view names, dropping each in turn
        for name in names:
            query = 'drop table '+name
            r, msg = db.do_query(cursor, query)
            if msg != 'Success':
                print 'Error dropping table',name
                print msg
        # Drop Bin Reader procedure
        query = 'drop proc ov_fBinReader_V'+str(int(version))
        r, msg = db.do_query(cursor, query)
        if msg != 'Success':
            print 'Error, could not delete Bin Reader procedure for version:',version
            print msg
            return False
    else:
        return False
    print 'Successfully dropped all existing tables and table definition for version', version
    return True
Пример #10
0
def get_median_wind(wthr):
    '''  Temporary work-around for mis-behaving weather station.
         Given the weather dictionary, query the SQL database for
         the last 120-s of wind data, and calculate median rather
         than average.  I hope this does not take too long!  Returns
         the same dictionary, with median replacing average wind.
    '''
    import dbutil as db
    cursor = db.get_cursor()
    query = 'select top 120 Timestamp,Sche_Data_Weat_Wind from fV66_vD1 order by Timestamp desc'
    data, msg = db.do_query(cursor,query)
    if msg == 'Success':
        try:
            medwind = np.median(data['Sche_Data_Weat_Wind'])
            wthr.update({'mt2MinRollAvgWindSpeed': medwind})
        except:
            pass
    cursor.close()
    return wthr
Пример #11
0
def get_fseqfile(t=None):
    if t is None:
        # 10 s ago...
        tlv = Time.now().lv - 10
    else: 
        tlv = int(t.lv)
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor,tlv)
    # Get front end attenuator states
    query = 'select Timestamp,LODM_LO1A_FSeqFile from fV'+ver+'_vD1 where Timestamp between '+str(tlv)+' and '+str(tlv+1)+' order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqfile = data['LODM_LO1A_FSeqFile'][0].replace('\x00','')
        if fseqfile == 'none':
            fseqfile = None
    else:
        print 'Error: ',msg
        fseqfile = None
    cursor.close()
    return fseqfile
Пример #12
0
def find_solpnt(t=None):
    ''' Makes an SQL query to find the SOLPNTCAL scans for the date given in
        the Time() object t.  A list of timestamps is returned, along with 
        the timestamp of the object provided.
    '''
    import dbutil
    if t is None:
        # Get today's date
        t = util.Time.now()
    timestamp = int(t.lv)
    stimestamp = timestamp - (timestamp % 86400)  # Start of day
    etimestamp = stimestamp + 86399  # End of day
    # Open handle to SQL database
    cursor = dbutil.get_cursor()
    # Try to find a scan header with project SOLPNTCAL (only works after 2014 Nov. 30)
    verstr = dbutil.find_table_version(cursor, timestamp, True)
    if verstr is None:
        print 'No scan_header table found for given time.'
        return [], timestamp
    # First retrieve the Project from all scan headers for the day
    cursor.execute('select timestamp,Project from hV' + verstr +
                   '_vD1 where timestamp between ' + str(stimestamp) +
                   ' and ' + str(etimestamp) + ' order by timestamp')
    data = np.transpose(np.array(cursor.fetchall()))
    names = stateframedef.numpy.array(cursor.description)[:, 0]
    cursor.close()
    if len(data) == 0:
        # No SOLPNTCAL found, so return empty list (and timestamp)
        return [], timestamp
    else:
        projdict = dict(zip(names, data))
        projdict['timestamp'] = projdict['timestamp'].astype(
            'float')  # Convert timestamps from string to float
    good = np.where(projdict['Project'] == 'SOLPNTCAL')[0]
    if len(good) != 0:
        if len(good) == 1:
            return [projdict['timestamp'][good]], timestamp
        else:
            return projdict['timestamp'][good], timestamp
    else:
        return [], timestamp
Пример #13
0
def findscans(trange):
    '''Identify phasecal scans from UFDB files
    '''
    import dbutil
    import dump_tsys
    tstart, tend = trange.lv.astype(int).astype(str)
    cursor = dbutil.get_cursor()
    verstr = dbutil.find_table_version(cursor, tstart, True)
    query = 'select Timestamp,Project,SourceID from hV'+verstr+'_vD1 where left(Project,8) = "PHASECAL" and Timestamp between '+tstart+' and '+tend+' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        return {'msg':msg}
    if projdict == {}:
        return {'msg':'No PHASECAL scans for this day'}
    tsint = projdict['Timestamp'].astype(int)
    # Check UFDB file to get duration
    ufdb = dump_tsys.rd_ufdb(Time(int(tstart),format='lv'))
    mjd0 = int(Time(int(tstart),format='lv').mjd)
    mjdnow = int(Time.now().mjd)
    if mjd0 < mjdnow:
        # The date is a previous day, so read a second ufdb file 
        # to ensure we have the whole local day
        try:
            ufdb2 = dump_tsys.rd_ufdb(Time(int(tstart)+86400.,format='lv'))
            for key in ufdb.keys():
                ufdb.update({key: np.append(ufdb[key], ufdb2[key])})
        except:
            # No previous day, so just skip it.
            pass
    ufdb_times = ufdb['ST_TS'].astype(float).astype(int)
    idx = nearest_val_idx(tsint,ufdb_times)
    fpath = '/data1/eovsa/fits/UDB/' + trange[0].iso[:4] + '/'
    dur = []
    file = []
    for i in idx:
        dur.append(((ufdb['EN_TS'].astype(float) - ufdb['ST_TS'].astype(float))[i])/60.)
        file.append(fpath+ufdb['FILE'][i])
    # Fix source ID to remove nulls
    srclist = np.array([str(i.replace('\x00','')) for i in projdict['SourceID']])
    return {'Timestamp': tsint, 'SourceID': srclist, 'duration': np.array(dur), 'filelist':np.array(file), 'msg': msg}
Пример #14
0
def find_solpnt(t=None):
    ''' Makes an SQL query to find the SOLPNTCAL scans for the date given in
        the Time() object t.  A list of timestamps is returned, along with 
        the timestamp of the object provided.
    '''
    import dbutil
    if t is None: 
        # Get today's date
        t = util.Time.now()
    timestamp = int(t.lv)
    stimestamp = timestamp - (timestamp % 86400)  # Start of day
    etimestamp = stimestamp + 86399               # End of day
    # Open handle to SQL database
    cursor = dbutil.get_cursor()
    # Try to find a scan header with project SOLPNTCAL (only works after 2014 Nov. 30)
    verstr = dbutil.find_table_version(cursor,timestamp,True)
    if verstr is None:
        print 'No scan_header table found for given time.'
        return [], timestamp
    # First retrieve the Project from all scan headers for the day
    cursor.execute('select timestamp,Project from hV'+verstr+'_vD1 where timestamp between '+str(stimestamp)+' and '+str(etimestamp)+' order by timestamp')
    data = np.transpose(np.array(cursor.fetchall()))
    names = stateframedef.numpy.array(cursor.description)[:,0]
    cursor.close()
    if len(data) == 0:
        # No SOLPNTCAL found, so return empty list (and timestamp)
        return [], timestamp
    else:
        projdict = dict(zip(names,data))
        projdict['timestamp'] = projdict['timestamp'].astype('float')  # Convert timestamps from string to float
    good = np.where(projdict['Project'] == 'SOLPNTCAL')[0]
    if len(good) != 0:
        if len(good) == 1:
            return [projdict['timestamp'][good]], timestamp
        else:
            return projdict['timestamp'][good], timestamp
    else:
        return [], timestamp
Пример #15
0
def read_cal_xml(type,t=None):
    ''' Read the calibration type definition xml record of the given type, for the 
        given time (as a Time() object), or for the current time if None.
        
        Returns a dictionary of look-up information and its internal version.  A side-effect
        is that a file /tmp/type<n>.xml is created, where <n> is the type.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    typdict = cal_types()
    try:
        typinfo = typdict[type]
    except:
        print 'Type',type,'not found in type definition dictionary.'
        return {}, None
    cursor = dbutil.get_cursor()
    # Read type definition XML from abin table
    query = 'select top 1 * from abin where Version = '+str(type)+'.0 and Timestamp <='+str(timestamp)+' order by Timestamp desc'
    sqldict, msg = dbutil.do_query(cursor,query)
    if msg == 'Success':
        if len(sqldict) == 0:
            # This type of xml file does not yet exist in the database, so mark it for adding
            print 'Type',type,'not defined in abin table.'
            cursor.close()
            return {}, None
        else:
            # There is one, so read it and the corresponding binary data
            buf = sqldict['Bin'][0]   # Binary representation of xml file
            xmlfile = '/tmp/type'+str(type)+'.xml'
            f = open(xmlfile,'wb')
            f.write(buf)
            f.close()
            xmldict, thisver = read_xml2.xml_ptrs(xmlfile)
            cursor.close()
            return xmldict, thisver
Пример #16
0
def get_solpnt(t=None):
    ''' Get the SOLPNT data from the SQL database, occurring after 
        time given in the Time() object t.  If omitted, the first 
        SOLPNT scan for the current day is used (if it exists). '''
    import dbutil
    tstamps, timestamp = find_solpnt(t)
    # Find first SOLPNTCAL occurring after timestamp (time given by Time() object)
    if tstamps != []:
        print 'SOLPNTCAL scans were found at ',        
        for tstamp in tstamps:
            if type(tstamp) is np.ndarray:
                # Annoyingly necessary when only one time in tstamps
                tstamp = tstamp[0]
            t1 = util.Time(tstamp,format='lv')
            print t1.iso,';',
        print ' '
        good = np.where(tstamps >= timestamp)[0]
        # This is the timestamp of the first SOLPNTCAL scan after given time
        if good.shape[0] == 0: 
            stimestamp = tstamps[0]
        else:
            stimestamp = tstamps[good][0]
    else:
        print 'Warning: No SOLPNTCAL scan found, so interpreting given time as SOLPNTCAL time.' 
        stimestamp = timestamp

    # Grab 300 records after the start time
    cursor = dbutil.get_cursor()
    # Now version independent!
    verstr = dbutil.find_table_version(cursor,timestamp)
    if verstr is None:
        print 'No stateframe table found for the given time.'
        return {}
    solpntdict = dbutil.get_dbrecs(cursor,version=int(verstr),dimension=15,timestamp=stimestamp,nrecs=300)
    # Need dimension-1 data to get antennas in subarray -- Note: sometimes the antenna list
    # is zero (an unlikely value!) around the time of the start of a scan, so keep searching
    # first 100 records until non-zero:
    for i in range(100):
        blah = dbutil.get_dbrecs(cursor,version=int(verstr),dimension=1,timestamp=stimestamp+i,nrecs=1)
        if blah['LODM_Subarray1'][0] != 0:
            break
    cursor.close()
    sub1 = blah['LODM_Subarray1'][0]
    subarray1 = []
    antlist = []
    for i in range(16): 
        subarray1.append(sub1 & (1<<i) > 0)
        if subarray1[-1]:
            antlist.append(i)

#    print 'Antlist:',antlist
#    rao = np.zeros([15,300],dtype='int')
#    deco = np.zeros([15,300],dtype='int')
#    trk = np.zeros([15,300],dtype='bool')
#    hpol = np.zeros([15,300],dtype='float')
#    vpol = np.zeros([15,300],dtype='float')
#    ra = np.zeros(15,dtype='float')
#    dec = np.zeros(15,dtype='float')
    ra = (solpntdict['Ante_Cont_RAVirtualAxis'][:,antlist]*np.pi/10000./180.).astype('float')
    dec = (solpntdict['Ante_Cont_DecVirtualAxis'][:,antlist]*np.pi/10000./180.).astype('float')
    hpol = (solpntdict['Ante_Fron_FEM_HPol_Voltage'][:,antlist]).astype('float')
    vpol = (solpntdict['Ante_Fron_FEM_VPol_Voltage'][:,antlist]).astype('float')
    rao = (solpntdict['Ante_Cont_RAOffset'][:,antlist]).astype('float')
    deco = (solpntdict['Ante_Cont_DecOffset'][:,antlist]).astype('float')
    times = solpntdict['Timestamp'][:,0].astype('int64').astype('float')
    # Convert pointing info to track information
    outdict = stateframe.azel_from_sqldict(solpntdict)
    trk = np.logical_and(outdict['dAzimuth'][:,antlist]<0.0020,outdict['dElevation'][:,antlist]<0.0020)
    
    return {'Timestamp':stimestamp,'tstamps':times,'antlist':antlist,'trjfile':'SOLPNT.TRJ','ra':ra,'dec':dec,
             'rao':rao,'deco':deco,'trk':trk,'hpol':hpol,'vpol':vpol}
Пример #17
0
def get_attncal(trange, do_plot=False, dataonly=False):
    ''' Finds GAINCALTEST scans from FDB files corresponding to the days
        present in trange Time() object (can be multiple days), calculates
        the attenuation differences for the various FEMATTN states 1-8 
        relative to FEMATTN state 0, and optionally plots the results for
        states 1 and 2 (the most commonly used).  To analyze only a single
        day, trange Time() object can have the same time repeated, or can
        be a single time.
        
        Returns a list of dictionaries, each pertaining to one of the days
        in trange, with keys defined as follows:
           'time':      The start time of the GAINCALTEST scan, as a Time() object
           'fghz':      The list of frequencies [GHz] at which attenuations are measured
           'attn':      The array of attenuations [dB] of size (nattn, nant, npol, nf), 
                           where nattn = 8, nant = 13, npol = 2, and nf is variable
           'rcvr':      The array of receiver noise level (raw units) of size 
                           (nant, npol, nf), where nant = 13, npol = 2, and nf is variable
           'rcvr_auto': Same as rcvr, but for auto-correlation (hence it is complex)
                           
        N.B.: Ignores days with other than one GAINCALTEST measurement, e.g. 0 or 2,
              the first is obvious, while the second is because there is no way to
              tell which of the 2 are good.
        
        The dataonly parameter tells the routine to skip calculating the attenuation
        and only return the IDB data from the (first) gaincal.
    '''
    from util import get_idbdir, fname2mjd, nearest_val_idx
    import socket
    import dbutil
    if type(trange.mjd) == np.float:
        # Interpret single time as both start and end time
        mjd1 = int(trange.mjd)
        mjd2 = mjd1
    else:
        mjd1, mjd2 = trange.mjd.astype(int)
    if do_plot:
        import matplotlib.pylab as plt
        f, ax = plt.subplots(4, 13)
        f.set_size_inches((14, 5))
        ax[0, 0].set_ylabel('Atn1X [dB]')
        ax[1, 0].set_ylabel('Atn1Y [dB]')
        ax[2, 0].set_ylabel('Atn2X [dB]')
        ax[3, 0].set_ylabel('Atn2Y [dB]')
        for i in range(13):
            ax[0, i].set_title('Ant ' + str(i + 1))
            ax[3, i].set_xlabel('Freq [GHz]')
            for j in range(2):
                ax[j, i].set_ylim(1, 3)
                ax[j + 2, i].set_ylim(3, 5)
    outdict = []
    for mjd in range(mjd1, mjd2 + 1):
        fdb = dt.rd_fdb(Time(mjd, format='mjd'))
        gcidx, = np.where(fdb['PROJECTID'] == 'GAINCALTEST')
        if len(gcidx) == 1:
            print fdb['FILE'][gcidx]
            gcidx = gcidx[0]
        else:
            for i, fname in enumerate(fdb['FILE'][gcidx]):
                print str(i) + ': GAINCALTEST File', fname
            idex = input('There is more than one GAINCALTEST. Select: ' +
                         str(np.arange(len(gcidx))) + ':')
            gcidx = gcidx[idex]

        datadir = get_idbdir(Time(mjd, format='mjd'))
        # Add date path if on pipeline
        # if datadir.find('eovsa') != -1: datadir += fdb['FILE'][gcidx][3:11]+'/'

        host = socket.gethostname()
        if host == 'pipeline': datadir += fdb['FILE'][gcidx][3:11] + '/'

        file = datadir + fdb['FILE'][gcidx]
        out = ri.read_idb([file])
        if dataonly:
            return out
        # Get time from filename and read 120 records of attn state from SQL database
        filemjd = fname2mjd(fdb['FILE'][gcidx])
        cursor = dbutil.get_cursor()
        d15 = dbutil.get_dbrecs(cursor,
                                dimension=15,
                                timestamp=Time(filemjd, format='mjd'),
                                nrecs=120)
        cursor.close()
        # Find time indexes of the 62 dB attn state
        # Uses only ant 1 assuming all are the same
        dtot = (d15['Ante_Fron_FEM_HPol_Atte_Second'] +
                d15['Ante_Fron_FEM_HPol_Atte_First'])[:, 0]
        # Use system clock day number to identify bad SQL entries and eliminate them
        good, = np.where(d15['Ante_Cont_SystemClockMJDay'][:, 0] != 0)
        #import pdb; pdb.set_trace()
        # Indexes into SQL records where a transition occurred.
        transitions, = np.where(dtot[good] - np.roll(dtot[good], 1) != 0)
        # Eliminate any zero-index transition (if it exists)
        if transitions[0] == 0:
            transitions = transitions[1:]
        # These now have to be translated into indexes into the data, using the times
        idx = nearest_val_idx(d15['Timestamp'][good, 0][transitions],
                              Time(out['time'], format='jd').lv)
        #import pdb; pdb.set_trace()
        vx = np.nanmedian(
            out['p'][:13, :, :, np.arange(idx[0] + 1, idx[1] - 1)], 3)
        va = np.mean(out['a'][:13, :2, :,
                              np.arange(idx[0] + 1, idx[1] - 1)], 3)
        vals = []
        attn = []
        for i in range(1, 10):
            vals.append(
                np.nanmedian(
                    out['p'][:13, :, :,
                             np.arange(idx[i] + 1, idx[i + 1] - 1)], 3) - vx)
            attn.append(np.log10(vals[0] / vals[-1]) * 10.)
        #vals = []
        #attna = []
        #for i in range(1,10):
        #    vals.append(np.median(out['a'][:13,:2,:,np.arange(idx[i],idx[i+1])],3) - va)
        #    attna.append(np.log10(vals[0]/vals[-1])*10.)

        if do_plot:
            for i in range(13):
                for j in range(2):
                    ax[j, i].plot(out['fghz'],
                                  attn[1][i, j],
                                  '.',
                                  markersize=3)
                    #ax[j,i].plot(out['fghz'],attna[1][i,j],'.',markersize=1)
                    ax[j + 2, i].plot(out['fghz'],
                                      attn[2][i, j],
                                      '.',
                                      markersize=3)
                    #ax[j+2,i].plot(out['fghz'],attna[2][i,j],'.',markersize=1)
        outdict.append({
            'time': Time(out['time'][0], format='jd'),
            'fghz': out['fghz'],
            'rcvr_auto': va,  # 'attna': np.array(attna[1:]), 
            'rcvr': vx,
            'attn': np.array(attn[1:])
        })
    return outdict
Пример #18
0
def send_xml2sql():
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.
    '''
    import dbutil, read_xml2, sys
    t = util.Time.now()
    timestamp = t.lv  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+key+'.0 order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if it agrees with the version
                buf = outdict['Bin'][0]   # Binary representation of xml file
                f = open('/tmp/tmp.xml','wb')
                f.write(buf)
                f.close()
                mydict, thisver = read_xml2.xml_ptrs('/tmp/tmp.xml')
                #print 'Versions:',float(key)+thisver/10.,defn_version
                if (float(key)+thisver/10.) == defn_version:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, float(key), typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                print typdict[key][0],'successfully added/updated to version',defn_version
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print typdict[key][0],'version',defn_version,'already exists--not updated'
    cursor.commit()
    cursor.close()
            
            
            
            
Пример #19
0
def send_xml2sql():
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.
    '''
    import dbutil, read_xml2, sys
    t = util.Time.now()
    timestamp = t.lv  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+key+'.0 order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if it agrees with the version
                buf = outdict['Bin'][0]   # Binary representation of xml file
                f = open('/tmp/tmp.xml','wb')
                f.write(buf)
                f.close()
                mydict, thisver = read_xml2.xml_ptrs('/tmp/tmp.xml')
                #print 'Versions:',float(key)+thisver/10.,defn_version
                if (float(key)+thisver/10.) == defn_version:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, float(key), typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                print typdict[key][0],'successfully added/updated to version',defn_version
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print typdict[key][0],'version',defn_version,'already exists--not updated'
    cursor.commit()
    cursor.close()
Пример #20
0
def get_gain_state(trange, dt=None, relax=False):
    ''' Get all gain-state information for a given timerange.  Returns a dictionary
        with keys as follows:
        
        times:     A Time object containing the array of times, size (nt)
        h1:        The first HPol attenuator value for 15 antennas, size (nt, 15) 
        v1:        The first VPol attenuator value for 15 antennas, size (nt, 15) 
        h2:        The second HPol attenuator value for 15 antennas, size (nt, 15) 
        v2:        The second VPol attenuator value for 15 antennas, size (nt, 15)
        dcmattn:   The base DCM attenuations for nbands x 15 antennas x 2 Poln, size (34 or 52,30)
                      The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc.
        dcmoff:    If DPPoffset-on is 0, this is None (meaning there are no changes to the
                      above base attenuations).  
                   If DPPoffset-on is 1, then dcmoff is a table of offsets to the 
                      base attenuation, size (nt, 50).  The offset applies to all 
                      antennas/polarizations.
                      
        Optional keywords:
           dt      Seconds between entries to read from SQL stateframe database. 
                     If omitted, 1 s is assumed.
           relax   Used for gain of reference time, in case there are no SQL data for the
                     requested time.  In that case it finds the data for the nearest later time.
    '''
    if dt is None:
        tstart, tend = [str(i) for i in trange.lv]
    else:
        # Expand time by 1/2 of dt before and after
        tstart = str(np.round(trange[0].lv - dt / 2))
        tend = str(np.round(trange[1].lv + dt / 2))
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor, trange[0].lv)
    # Get front end attenuator states
    # Attempt to solve the problem if there are no data
    if relax:
        # Special case of reference gain, where we want the first nt records after tstart, in case there
        # are no data at time tstart
        nt = int(float(tend) - float(tstart) - 1) * 15
        query = 'select top '+str(nt)+' Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \
            +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' order by Timestamp'
    else:
        query = 'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \
            +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp < '+tend+' order by Timestamp'
    #if dt:
    #    # If dt (seconds between measurements) is set, add appropriate SQL statement to query
    #    query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 '
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        if dt:
            # If we want other than full cadence, get new array shapes and times
            n = len(data['Timestamp'])  # Original number of times
            new_n = (
                n / 15 / dt
            ) * 15 * dt  # Truncated number of times equally divisible by dt
            new_shape = (n / 15 / dt, dt, 15)  # New shape of truncated arrays
            times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt],
                         format='lv')
        else:
            times = Time(data['Timestamp'].astype('int')[::15], format='lv')
        # Change tstart and tend to correspond to actual times from SQL
        tstart, tend = [str(i) for i in times[[0, -1]].lv]
        h1 = data['Ante_Fron_FEM_HPol_Atte_First']
        h2 = data['Ante_Fron_FEM_HPol_Atte_Second']
        v1 = data['Ante_Fron_FEM_VPol_Atte_First']
        v2 = data['Ante_Fron_FEM_VPol_Atte_Second']
        ms = data['Ante_Fron_FEM_Clockms']
        nt = len(h1) / 15
        h1.shape = (nt, 15)
        h2.shape = (nt, 15)
        v1.shape = (nt, 15)
        v2.shape = (nt, 15)
        ms.shape = (nt, 15)
        # Find any entries for which Clockms is zero, which indicates where no
        # gain-state measurement is available.
        for i in range(15):
            bad, = np.where(ms[:, i] == 0)
            if bad.size != 0 and bad.size != nt:
                # Find nearest adjacent good value
                good, = np.where(ms[:, i] != 0)
                idx = nearest_val_idx(bad, good)
                h1[bad, i] = h1[good[idx], i]
                h2[bad, i] = h2[good[idx], i]
                v1[bad, i] = v1[good[idx], i]
                v2[bad, i] = v2[good[idx], i]
        if dt:
            # If we want other than full cadence, find mean over dt measurements
            h1 = np.mean(h1[:new_n / 15].reshape(new_shape), 1)
            h2 = np.mean(h2[:new_n / 15].reshape(new_shape), 1)
            v1 = np.mean(v1[:new_n / 15].reshape(new_shape), 1)
            v2 = np.mean(v2[:new_n / 15].reshape(new_shape), 1)
        # Put results in canonical order [nant, nt]
        h1 = h1.T
        h2 = h2.T
        v1 = v1.T
        v2 = v2.T
    else:
        print 'Error reading FEM attenuations:', msg
        return {}
    # Get back end attenuator states
    xml, buf = ch.read_cal(2, t=trange[0])
    dcmattn = stf.extract(buf, xml['Attenuation'])
    nbands = dcmattn.shape[0]
    dcmattn.shape = (nbands, 15, 2)
    # Put into canonical order [nant, npol, nband]
    dcmattn = np.moveaxis(dcmattn, 0, 2)
    # See if DPP offset is enabled
    query = 'select Timestamp,DPPoffsetattn_on from fV' \
            +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        dppon = data['DPPoffsetattn_on']
        if np.where(dppon > 0)[0].size == 0:
            dcm_off = None
        else:
            query = 'select Timestamp,DCMoffset_attn from fV' \
                    +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend
            #if dt:
            #    # If dt (seconds between measurements) is set, add appropriate SQL statement to query
            #    query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 '
            query += ' order by Timestamp'
            data, msg = db.do_query(cursor, query)
            if msg == 'Success':
                otimes = Time(data['Timestamp'].astype('int')[::15],
                              format='lv')
                dcmoff = data['DCMoffset_attn']
                dcmoff.shape = (nt, 50)
                # We now have a time-history of offsets, at least some of which are non-zero.
                # Offsets by slot number do us no good, so we need to translate to band number.
                # Get fseqfile name at mean of timerange, from stateframe SQL database
                fseqfile = get_fseqfile(
                    Time(int(np.mean(trange.lv)), format='lv'))
                if fseqfile is None:
                    print 'Error: No active fseq file.'
                    dcm_off = None
                else:
                    # Get fseqfile from ACC and return bandlist
                    bandlist = fseqfile2bandlist(fseqfile)
                    nbands = len(bandlist)
                    # Use bandlist to covert nt x 50 array to nt x nbands array of DCM attn offsets
                    # Note that this assumes DCM offset is the same for any multiply-sampled bands
                    # in the sequence.
                    dcm_off = np.zeros((nt, nbands), float)
                    dcm_off[:, bandlist - 1] = dcmoff
                    # Put into canonical order [nband, nt]
                    dcm_off = dcm_off.T
                    if dt:
                        # If we want other than full cadence, find mean over dt measurements
                        new_nt = len(times)
                        dcm_off = dcm_off[:, :new_nt * dt]
                        dcm_off.shape = (nbands, dt, new_nt)
                        dcm_off = np.mean(dcm_off, 1)
            else:
                print 'Error reading DCM attenuations:', msg
                dcm_off = None
    else:
        print 'Error reading DPPon state:', msg
        dcm_off = None
    cursor.close()
    return {
        'times': times,
        'h1': h1,
        'v1': v1,
        'h2': h2,
        'v2': v2,
        'dcmattn': dcmattn,
        'dcmoff': dcm_off
    }
Пример #21
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 13 UT to 02 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 13./24,int(t.mjd) + 26./24],format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    try:
        verstr = dbutil.find_table_version(cursor,tstart)
        if verstr is None:
            print 'No stateframe table found for given time.'
            return tstart, [], {}
        cursor.execute('select * from fV'+verstr+'_vD15 where I15 < 4 and timestamp between '+tstart+' and '+tend+' order by timestamp')
    except:
        print 'Error with query of SQL database.'
        return tstart, [], {}
    data = np.transpose(np.array(cursor.fetchall(),'object'))
    if len(data) == 0:
        # No data found, so return timestamp and empty lists
        print 'SQL Query was valid, but no data for',t.iso[:10],'were found (yet).'
        return tstart, [], {}
    ncol,ntot = data.shape
    data.shape = (ncol,ntot/4,4)
    names = np.array(cursor.description)[:,0]
    mydict = dict(zip(names,data))
    hv = []
    ut = Time(mydict['Timestamp'][:,0].astype('float'),format='lv').plot_date 
    hfac = np.median(mydict['Ante_Fron_FEM_HPol_Voltage'].astype('float'),0)
    vfac = np.median(mydict['Ante_Fron_FEM_VPol_Voltage'].astype('float'),0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(mydict['Ante_Fron_FEM_HPol_Voltage'][:,i]/hfac[i])
        if vfac[i] > 0:
            hv.append(mydict['Ante_Fron_FEM_VPol_Voltage'][:,i]/vfac[i])
    flm = np.median(np.array(hv),0)
    good = np.where(abs(flm[1:]-flm[:-1])<0.01)[0]

    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor,trange[0].lv,True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return ut[good], flm[good], {}
    cursor.execute('select Timestamp,Project from hV'+verstrh+'_vD1 where Timestamp between '+tstart+' and '+tend+' order by Timestamp')
    data = np.transpose(np.array(cursor.fetchall()))
    names = np.array(cursor.description)[:,0]
    if len(data) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return ut[good], flm[good], {}
    projdict = dict(zip(names,data))
    projdict['Timestamp'] = projdict['Timestamp'].astype('float')  # Convert timestamps from string to float

    # Get the times when scanstate is -1
    cursor.execute('select Timestamp,Sche_Data_ScanState from fV'+verstr+'_vD1 where Timestamp between '+tstart+' and '+tend+' and Sche_Data_ScanState = -1 order by Timestamp')
    scan_off_times = np.transpose(np.array(cursor.fetchall()))[0]  #Just list of timestamps
    if len(scan_off_times) > 2:
        gaps = scan_off_times[1:] - scan_off_times[:-1] - 1
        eos = np.where(gaps > 10)[0]
        if len(eos) > 1:
            if scan_off_times[eos[1]] < projdict['Timestamp'][0]:
                # Gaps are not lined up, so drop the first:
                eos = eos[1:]
        EOS = scan_off_times[eos]
        if scan_off_times[eos[0]] <= projdict['Timestamp'][0]:
            # First EOS is earlier than first Project ID, so make first Project ID None.
            projdict['Timestamp'] = np.append([scan_off_times[0]],projdict['Timestamp'])
            projdict['Project'] = np.append(['None'],projdict['Project'])
        if scan_off_times[eos[-1]+1] >= projdict['Timestamp'][-1]:
            # Last EOS is later than last Project ID, so make last Project ID None.
            projdict['Timestamp'] = np.append(projdict['Timestamp'],[scan_off_times[eos[-1]+1]])
            projdict['Project'] = np.append(projdict['Project'],['None'])
            EOS = np.append(EOS,[scan_off_times[eos[-1]+1],scan_off_times[-1]])
        projdict.update({'EOS': EOS})
    else:
        # Not enough scan changes to determine EOS (end-of-scan) times
        projdict.update({'EOS': []})
    cursor.close()
    
    return ut[good],flm[good],projdict
Пример #22
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 13 UT to 02 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 13. / 24, int(t.mjd) + 26. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    try:
        verstr = dbutil.find_table_version(cursor, tstart)
        if verstr is None:
            print 'No stateframe table found for given time.'
            return tstart, [], {}
        cursor.execute('select * from fV' + verstr +
                       '_vD15 where I15 < 4 and timestamp between ' + tstart +
                       ' and ' + tend + ' order by timestamp')
    except:
        print 'Error with query of SQL database.'
        return tstart, [], {}
    data = np.transpose(np.array(cursor.fetchall(), 'object'))
    if len(data) == 0:
        # No data found, so return timestamp and empty lists
        print 'SQL Query was valid, but no data for', t.iso[:
                                                            10], 'were found (yet).'
        return tstart, [], {}
    ncol, ntot = data.shape
    data.shape = (ncol, ntot / 4, 4)
    names = np.array(cursor.description)[:, 0]
    mydict = dict(zip(names, data))
    hv = []
    ut = Time(mydict['Timestamp'][:, 0].astype('float'), format='lv').plot_date
    hfac = np.median(mydict['Ante_Fron_FEM_HPol_Voltage'].astype('float'), 0)
    vfac = np.median(mydict['Ante_Fron_FEM_VPol_Voltage'].astype('float'), 0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(mydict['Ante_Fron_FEM_HPol_Voltage'][:, i] / hfac[i])
        if vfac[i] > 0:
            hv.append(mydict['Ante_Fron_FEM_VPol_Voltage'][:, i] / vfac[i])
    flm = np.median(np.array(hv), 0)
    good = np.where(abs(flm[1:] - flm[:-1]) < 0.01)[0]

    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor, trange[0].lv, True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return ut[good], flm[good], {}
    cursor.execute('select Timestamp,Project from hV' + verstrh +
                   '_vD1 where Timestamp between ' + tstart + ' and ' + tend +
                   ' order by Timestamp')
    data = np.transpose(np.array(cursor.fetchall()))
    names = np.array(cursor.description)[:, 0]
    if len(data) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return ut[good], flm[good], {}
    projdict = dict(zip(names, data))
    projdict['Timestamp'] = projdict['Timestamp'].astype(
        'float')  # Convert timestamps from string to float

    # Get the times when scanstate is -1
    cursor.execute('select Timestamp,Sche_Data_ScanState from fV' + verstr +
                   '_vD1 where Timestamp between ' + tstart + ' and ' + tend +
                   ' and Sche_Data_ScanState = -1 order by Timestamp')
    scan_off_times = np.transpose(np.array(
        cursor.fetchall()))[0]  #Just list of timestamps
    if len(scan_off_times) > 2:
        gaps = scan_off_times[1:] - scan_off_times[:-1] - 1
        eos = np.where(gaps > 10)[0]
        if len(eos) > 1:
            if scan_off_times[eos[1]] < projdict['Timestamp'][0]:
                # Gaps are not lined up, so drop the first:
                eos = eos[1:]
        EOS = scan_off_times[eos]
        if scan_off_times[eos[0]] <= projdict['Timestamp'][0]:
            # First EOS is earlier than first Project ID, so make first Project ID None.
            projdict['Timestamp'] = np.append([scan_off_times[0]],
                                              projdict['Timestamp'])
            projdict['Project'] = np.append(['None'], projdict['Project'])
        if scan_off_times[eos[-1] + 1] >= projdict['Timestamp'][-1]:
            # Last EOS is later than last Project ID, so make last Project ID None.
            projdict['Timestamp'] = np.append(projdict['Timestamp'],
                                              [scan_off_times[eos[-1] + 1]])
            projdict['Project'] = np.append(projdict['Project'], ['None'])
            EOS = np.append(EOS,
                            [scan_off_times[eos[-1] + 1], scan_off_times[-1]])
        projdict.update({'EOS': EOS})
    else:
        # Not enough scan changes to determine EOS (end-of-scan) times
        projdict.update({'EOS': []})
    cursor.close()

    return ut[good], flm[good], projdict
Пример #23
0
def send_xml2sql(type=None,t=None,test=False,nant=None,nfrq=None):
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.  If type is provided (i.e. not None), only 
        the given type will be updated (and only if its internal version 
        number has changed).
        
        The timestamp of the new record will be set according to the Time()
        object t, if provided, or the current time if not.
        
        As a debugging tool, if test is True, this routine goes through the
        motions but does not write to the abin table.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    if type:
        # If a particular type is specified, limit the action to that type
        typdict = {type:typdict[type]}
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        if key == 1:
            # Special case for TP calibration
            if nant is None or nfrq is None:
                print 'For',typdict[key][0],'values for both nant and nfrq are required.'
                cursor.close()
                return
            exec 'buf = '+typdict[key][1]+'(nant='+str(nant)+',nfrq='+str(nfrq)+')'
        else:
            exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+str(key)+'.0 and Timestamp <= '+str(timestamp)+' order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if they differ
                buf2 = outdict['Bin'][0]   # Binary representation of xml file
                if buf == buf2:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                if test:
                    print 'Would have updated',typdict[key][0],'to version',defn_version
                else:
                    cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, key, typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                    print 'Type definition for',typdict[key][0],'successfully added/updated to version',defn_version,'--OK'
                    cursor.commit()
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print 'Type definition for',typdict[key][0],'version',defn_version,'exists--OK'
    cursor.close()
Пример #24
0
def get_reverseattn(trange_gaincal, trange_other=None, first_attn_base=5, second_attn_base=3, corrected_attns=False):
        # This function finds and return the tsys, the noise corrected tsys, and the noise and attenuation
        #  tsys. The first parameter it takes should be a GAINCALTEST trange, which may be located with find_gaincal, 
        #  The second parameter it takes should be a FEATTNTEST trange, which may be located with find_gaincal as
        #  well. It may or may not take a third parameter. Any form of file that was recorded by the system from 
        #  from the antennas may be inserted here, whether a flare or just quiet sun data.
        #
        #  PLEASE NOTE: ANY trange with data may be used as "trange_gaincal", use a trange from a GAINCALTEST and the other 
        #  file as "trange_other" if you want the noise to be calibrated from the GAINCALTEST file, which will most likely
        #  be more recent than the FEATTNTEST file it would otherwise take the noise from. 
        tsys1, res1, noise_level, idx1a, idx3a, marker1, trange_feattncal, trange_feattncal2  = attn_noises(trange_gaincal)
        if corrected_attns == True:
            all_attns_avg = get_all_attns(trange_feattncal, test='FEATTNTEST', from_corrected_attns=True)
            all_attns_avg1 = get_all_attns(trange_feattncal2, test='FEATTNTEST2', from_corrected_attns=True)
        else:
            if corrected_attns == False:
                all_attns_avg = get_all_avg_attns(trange_feattncal, test='FEATTNTEST')
                all_attns_avg1 = get_all_avg_attns(trange_feattncal2, test='FEATTNTEST2')
        
        if trange_other == None:
            tsys = tsys1
            res = res1
            idx1 = idx1a
            idx3 = idx3a
            marker = marker1
        else:
            s = sp.Spectrogram(trange_other)
      	    s.docal = False
	    s.dosub = False
	    s.domedian = False        
	    tsys, std = s.get_data()
            cursor = dbutil.get_cursor()
	    res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange_other[0].lv)+' and '+str(trange_other[1].lv))
	    cursor.close()
            idx1, idx2 = util.common_val_idx(res['Timestamp'][0::15].astype('int'), (s.time.lv+0.5).astype('int'))
            marker = -1
            while idx1[-1] > idx2[-1]:
                idx1 = np.delete(idx1, -1)
                marker += 1
            tsys = tsys[:, :, :, idx1]
            idx3, idx4 = util.common_val_idx(res['Timestamp'].astype('int'), (s.time.lv+0.5).astype('int'))
            res['Timestamp'] = res['Timestamp'][idx3]

        if noise_level.shape[2] < tsys.shape[2]:
            freqs_for_range = noise_level.shape[2]
        else:
            freqs_for_range = tsys.shape[2]
                            
	tsys_noise_corrected = []
	for ant in range(tsys.shape[0]):
	    pol_corrected = []
	    for pol in range(tsys.shape[1]):
		freq_corrected = []
		for freq in range(freqs_for_range):
		    index_corrected = []
		    for index in range(tsys.shape[3]):
		        index_corrected.append(tsys[ant, pol, freq, index] - noise_level[ant, pol, freq])
		    freq_corrected.append(index_corrected)
		pol_corrected.append(freq_corrected)
	    tsys_noise_corrected.append(pol_corrected)
	tsys_noise_corrected= np.array(tsys_noise_corrected)
        
        freqloopslist = [tsys_noise_corrected.shape[2], all_attns_avg.shape[3], all_attns_avg1.shape[3]]
        freqloops = min(freqloopslist)

        if tsys.shape[3] < len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15]):
            indexloops = tsys.shape[3]
        else:
            if tsys.shape[3] >= len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15]):
                indexloops = len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15])-1  
        if tsys.shape[3] < len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15]):
            indexloops1 = tsys.shape[3]
        else:
            if tsys.shape[3] >= len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15]):
                indexloops1 = len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15])-1   
        idxstart = marker + (15-8)
        xory = ['x' , 'y']
	ant_postcorrected = []
	for ant in range(tsys.shape[0]):
	    pol_postcorrected = []
	    for pol in range(tsys.shape[1]):
		freq_postcorrected = []
		for freq in range(freqloops):
                     
                    indices_postcorrected = []
                    for indx in range(indexloops): 
                        testlevel = res['Ante_Fron_FEM_HPol_Atte_Second'][ant::15][indx+idxstart] 
                        
                        if 0 <= testlevel <= 31:
                            pass
                        else:
                            print 'Problem with the attenuation of antenna ' + str(ant) + xory[pol] + ' at frequency channel ' + str(freq) + ' and time index '  + str(indx) + '. The attenuation is showing: ' + str(testlevel)       
                            testlevel = 0       
		        indices_postcorrected.append(10**((all_attns_avg[testlevel, ant, pol, freq]-all_attns_avg[second_attn_base, ant, pol, freq])/10)*tsys_noise_corrected[ant, pol, freq, indx])
                    indices_postcorrected1 = []
                    for indx in range(indexloops1): 
                        testlevel = res['Ante_Fron_FEM_HPol_Atte_First'][ant::15][indx+idxstart]                        
                        if 0 <= testlevel <= 31:
                            pass
                        else:
                            print 'Problem with the attenuation of antenna ' + str(ant) + xory[pol] + ' at frequency channel ' + str(freq) + ' and time index '  + str(indx) + '. The attenuation is showing: ' + str(testlevel)       
                            testlevel = 0      
		        indices_postcorrected1.append(10**((all_attns_avg1[testlevel, ant, pol, freq]-all_attns_avg1[first_attn_base, ant, pol, freq])/10)*indices_postcorrected[indx])                            
		    freq_postcorrected.append(indices_postcorrected1)
		pol_postcorrected.append(freq_postcorrected)
	    ant_postcorrected.append(pol_postcorrected)
	tsys_attn_noise_corrected = np.array(ant_postcorrected)
        
        return tsys_attn_noise_corrected, tsys_noise_corrected, tsys
Пример #25
0
def attn_noises(trange_gaincal):
        # This function is used with "get_reverseattn." It find and returns background noise, and returns the "res"
        #  from dbutil.do_query and returns the "tsys" from spectrogram_fit and get_data().
        #  The first parameter it takes should be a GAINCALTEST trange, which may be located with find_gaincal, 
        #  The second parameter it takes should be a FEATTNTEST trange, which may be located with find_gaincal as
        #  well. 
        #
        #  PLEASE NOTE: ANY trange with data may be used as "trange_gaincal", use a trange from a GAINCALTEST and the other 
        #  file as "trange_other" if you want the noise to be calibrated from the GAINCALTEST file, which will most likely
        #  be more recent than the FEATTNTEST file it would otherwise take the noise from. 
        s = sp.Spectrogram(trange_gaincal)
	s.docal = False
	s.dosub = False
	s.domedian = False        
	tsys, std = s.get_data()
        trange_feattncal = find_gaincal()
        if type(trange_feattncal) == list:
            trange_feattncal = trange_feattncal[-1]
        else:
            pass
        trange_feattncal2 = find_gaincal(t = Time('2015-07-21 00:00'), scan_length=5, findwhat='FEATTNTEST2')
        if type(trange_feattncal2) == list:
            trange_feattncal2 = trange_feattncal2[-1]
        else:
            pass
        ratios, calfilenoise = show_dB_ratio(trange_feattncal)
        ratios1, calfilenoise1 = show_dB_ratio(trange_feattncal2, test='FEATTNTEST2')

	cursor = dbutil.get_cursor()
	res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange_gaincal[0].lv)+' and '+str(trange_gaincal[1].lv))
	cursor.close()

        idx1, idx2 = util.common_val_idx(res['Timestamp'][0::15].astype('int'), (s.time.lv+0.5).astype('int'))       
        idx3, idx4 = util.common_val_idx(res['Timestamp'].astype('int'), (s.time.lv+0.5).astype('int'))
        marker = -1
        while idx1[-1] > idx2[-1]:
            idx1 = np.delete(idx1, -1)
            marker += 1
        tsys = tsys[:, :, :, idx1]

        calfilenoise_ = []
        for ant in range(calfilenoise.shape[0]):
            calfilenoisepol = []
            for pol in range(calfilenoise.shape[1]):
                calfilenoisefreq = []
                for freq in range(calfilenoise.shape[2]):
                    calfilenoisefreq.append(np.average(calfilenoise[ant, pol, freq, :]))
                calfilenoisepol.append(calfilenoisefreq)
            calfilenoise_.append(calfilenoisepol)
        calfilenoise = np.array(calfilenoise_)

	noise_level = []
	for ant in range(tsys.shape[0]):
	    pol_noise = []
	    for pol in range(tsys.shape[1]):
		freq_noise = []
		state, = np.where(np.logical_and(res['Ante_Fron_FEM_HPol_Atte_Second'][ant::15].astype('int') == 31,res['Ante_Fron_FEM_HPol_Atte_First'][ant::15].astype('int') == 31))
		for freq in range(tsys.shape[2]):
		    avg_noise = []
		    for index in state:
                        try:
		            if np.logical_and(tsys[ant, pol, freq, index] <= 0.005, index < tsys.shape[3]):
		                avg_noise.append(tsys[ant, pol, freq, index])
                        except:
                            pass
		    freq_noise.append(np.average(avg_noise))
		pol_noise.append(freq_noise)
	    noise_level.append(pol_noise)
	noise_level = np.array(noise_level)

        for ant in range(tsys.shape[0]):
	    for pol in range(tsys.shape[1]):
		for freq in range(tsys.shape[2]):                      
		    if np.isnan(noise_level[ant, pol, freq]) == False:
                        pass
                    else:
                        if np.isnan(noise_level[ant, pol, freq]) == True:                           
                            try:
                                noise_level[ant, pol, freq] = calfilenoise[ant, pol, freq]
                            except:
                                pass

        return tsys, res, noise_level, idx1, idx3, marker, trange_feattncal, trange_feattncal2
Пример #26
0
def rd_calpnt(filename):
    ''' Read and return contents of output of CALPNT or CALPNT2M observation
        Note that the "x" offsets are dRA and dAZ.  To apply these in subsequent
        routines, dRA is converted to dHA by inverting the sign, while dAZ is 
        converted to dXEL (angle on the sky) by multiplying by cos(EL).
    '''
    import dbutil
    f = open(filename, 'r')
    lines = f.readlines()
    f.close()
    ants = lines[0].split('Ant ')[1:]
    antlist = []
    try:
        for ant in ants:
            antlist.append(int(ant[:2]))
    except:
        print 'Unrecognized format (header line) for', filename
        return None
    nants = len(ants)
    lines = lines[1:]
    nlines = len(lines)
    dra = np.zeros((nants, nlines), np.float)
    ddec = np.zeros((nants, nlines), np.float)
    ha = []
    dec = []
    source = []
    timstr = []
    for i, line in enumerate(lines):
        if len(line) < 9:
            dra = dra[:, :i]
            ddec = ddec[:, :i]
            break
        vals = line[9:].split()
        if len(vals) != nants * 2 + 4:
            print 'Error reading line', i + 2, 'of', filename
            dra = dra[:, :i]
            ddec = ddec[:, :i]
            break
        else:
            try:
                source.append(line[:8])
                timstr.append(vals[0] + ' ' + vals[1])
                ha.append(vals[2])
                dec.append(vals[3])
                dra[:, i] = np.array(vals[4::2]).astype(float)
                ddec[:, i] = np.array(vals[5::2]).astype(float)
            except:
                print 'Error parsing line', i + 2, 'of', filename
    # Convert HA, Dec from degrees to radians, and then convert HA to RA
    ha = np.array(ha).astype(float) * np.pi / 180.
    dec = np.array(dec).astype(float) * np.pi / 180.
    times = Time(timstr)
    ra = np.zeros_like(ha)
    for i in range(len(ha)):
        ra[i] = eovsa_lst(times[i]) - ha[i]

    # Read pointing parameters from SQL database at time of first observation
    params_old = np.zeros((9, 15), int)
    cursor = dbutil.get_cursor()
    timestamp = times[0].lv
    # Read stateframe data at time of first observation
    D15data = dbutil.get_dbrecs(cursor,
                                dimension=15,
                                timestamp=timestamp,
                                nrecs=1)
    for p in range(9):
        params_old[p], = D15data['Ante_Cont_PointingCoefficient' + str(p + 1)]
    params_old = params_old[:, np.array(antlist) -
                            1]  # Pare down to only antennas in antlist

    return {
        'filename': filename,
        'source': source,
        'time': times,
        'params_old': params_old,
        'ra': ra,
        'dec': dec,
        'ha': ha,
        'antlist': antlist,
        'dra': dra,
        'ddec': ddec
    }
Пример #27
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    verstr = dbutil.find_table_version(cursor, tstart)
    if verstr is None:
        print 'No stateframe table found for given time.'
        return tstart, [], {}
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage,Ante_Fron_FEM_VPol_Voltage from fV' + verstr + '_vD15 where timestamp between ' + tstart + ' and ' + tend + ' order by timestamp'
    data, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return tstart, [], {}
    for k, v in data.items():
        data[k].shape = (len(data[k]) / 15, 15)
    hv = []
    try:
        ut = Time(data['Timestamp'][:, 0].astype('float'),
                  format='lv').plot_date
    except:
        print 'Error for time', t.iso
        print 'Query:', query, ' returned msg:', msg
        print 'Keys:', data.keys()
        print data['Timestamp'][0, 0]
    hfac = np.median(data['Ante_Fron_FEM_HPol_Voltage'].astype('float'), 0)
    vfac = np.median(data['Ante_Fron_FEM_VPol_Voltage'].astype('float'), 0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_HPol_Voltage'][:, i] / hfac[i])
        if vfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_VPol_Voltage'][:, i] / vfac[i])
    flm = np.median(np.array(hv), 0)
    good = np.where(abs(flm[1:] - flm[:-1]) < 0.01)[0]

    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor, trange[0].lv, True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return ut[good], flm[good], {}
    query = 'select Timestamp,Project from hV' + verstrh + '_vD1 where Timestamp between ' + tstart + ' and ' + tend + ' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return ut[good], flm[good], {}
    elif len(projdict) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return ut[good], flm[good], {}
    projdict['Timestamp'] = projdict['Timestamp'].astype(
        'float')  # Convert timestamps from string to float
    for i in range(len(projdict['Project'])):
        projdict['Project'][i] = projdict['Project'][i].replace('\x00', '')

    # # Get the times when scanstate is -1
    # cursor.execute('select Timestamp,Sche_Data_ScanState from fV'+verstr+'_vD1 where Timestamp between '+tstart+' and '+tend+' and Sche_Data_ScanState = -1 order by Timestamp')
    # scan_off_times = np.transpose(np.array(cursor.fetchall()))[0]  #Just list of timestamps
    # if len(scan_off_times) > 2:
    # gaps = scan_off_times[1:] - scan_off_times[:-1] - 1
    # eos = np.where(gaps > 10)[0]
    # if len(eos) > 1:
    # if scan_off_times[eos[1]] < projdict['Timestamp'][0]:
    # # Gaps are not lined up, so drop the first:
    # eos = eos[1:]
    # EOS = scan_off_times[eos]
    # if scan_off_times[eos[0]] <= projdict['Timestamp'][0]:
    # # First EOS is earlier than first Project ID, so make first Project ID None.
    # projdict['Timestamp'] = np.append([scan_off_times[0]],projdict['Timestamp'])
    # projdict['Project'] = np.append(['None'],projdict['Project'])
    # if scan_off_times[eos[-1]+1] >= projdict['Timestamp'][-1]:
    # # Last EOS is later than last Project ID, so make last Project ID None.
    # projdict['Timestamp'] = np.append(projdict['Timestamp'],[scan_off_times[eos[-1]+1]])
    # projdict['Project'] = np.append(projdict['Project'],['None'])
    # EOS = np.append(EOS,[scan_off_times[eos[-1]+1],scan_off_times[-1]])
    # projdict.update({'EOS': EOS})
    # else:
    # # Not enough scan changes to determine EOS (end-of-scan) times
    # projdict.update({'EOS': []})
    # This turns out to be a more rational, and "good-enough"" approach to the end of scan problem.
    # The last scan, though, will be ignored...
    projdict.update({'EOS': projdict['Timestamp'][1:]})
    projdict.update({'Timestamp': projdict['Timestamp'][:-1]})
    projdict.update({'Project': projdict['Project'][:-1]})
    cursor.close()
    return ut[good], flm[good], projdict
Пример #28
0
def DCM_master_attn_cal(update=False):
    ''' New version of this command, which uses the power values in
        the 10gbe packet headers instead of the very slow measurement
        of the ADC levels themselves.  This version only takes about 8 s!
        
        If update is True, it writes the results to the SQL database.
        
        Returns the DCM_master_table in the form of lines of text
        strings, with labels (handy for viewing).
    '''
    pwr = np.zeros((50,8,4),'int')
    # Capture on eth2 interface
    command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000'
    p.sendcmd(command)
    # Capture on eth3 interface
    command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000'
    p.sendcmd(command)
    headers = p.list_header('/home/user/Python/dcm2.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    headers = p.list_header('/home/user/Python/dcm3.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    # Reshape to (slot, nant, npol)
    pwr.shape = (50,16,2)
    # Read current frequency sequence from database
    cursor = db.get_cursor()
    query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist)-0.44)*2).astype(int)
    cursor.close()
    # Read current DCM_master_table from database
    xml, buf = ch.read_cal(2)
    orig_table = stf.extract(buf,xml['Attenuation'])
    # Order pwr values according to bandlist, taking median of any repeated values
    new_pwr = np.zeros((34,16,2))
    for i in range(34):
        idx, = np.where(bandlist-1 == i)
        if len(idx) > 0:
            new_pwr[i] = np.median(pwr[idx],0)
    new_pwr.shape = (34,32)
    # Now determine the change in attenuation needed to achieve a target
    # value of 1600.  Eliminate last two entries, corresponding to Ant16
    attn = np.log10(new_pwr[:,:-2]/1600.)*10.
    new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2
    DCMlines = []
    DCMlines.append('#      Ant1  Ant2  Ant3  Ant4  Ant5  Ant6  Ant7  Ant8  Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
    DCMlines.append('#      X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y')
    DCMlines.append('#     ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
    for band in range(1,35):
        DCMlines.append('{:2} :  {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1]))
    if update:
        msg = ch.dcm_master_table2sql(DCMlines)
        if msg:
            print 'Success'
        else:
            print 'Error writing table to SQL database!'
    return DCMlines
Пример #29
0
def get_fem_level(trange, dt=None):
    ''' Get FEM attenuation levels for a given timerange.  Returns a dictionary
        with keys as follows:

        times:     A Time object containing the array of times, size (nt)
        hlev:      The FEM attenuation level for HPol, size (nt, 15) 
        vlev:      The FEM attenuation level for VPol, size (nt, 15)
        dcmattn:   The base DCM attenuations for 34 bands x 15 antennas x 2 Poln, size (34,30)
                      The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc.
        dcmoff:    If DPPoffset-on is 0, this is None (meaning there are no changes to the
                      above base attenuations).  
                   If DPPoffset-on is 1, then dcmoff is a table of offsets to the 
                      base attenuation, size (nt, 50).  The offset applies to all 
                      antennas/polarizations.
                      
        Optional keywords:
           dt      Seconds between entries to read from SQL stateframe database. 
                     If omitted, 1 s is assumed.
        
    '''
    if dt is None:
        tstart, tend = [str(i) for i in trange.lv]
    else:
        # Expand time by 1/2 of dt before and after
        tstart = str(np.round(trange[0].lv - dt / 2))
        tend = str(np.round(trange[1].lv + dt / 2))
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor, trange[0].lv)
    # Get front end attenuator states
    query = 'select Timestamp,Ante_Fron_FEM_Clockms,' \
            +'Ante_Fron_FEM_HPol_Regi_Level,Ante_Fron_FEM_VPol_Regi_Level from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        if dt:
            # If we want other than full cadence, get new array shapes and times
            n = len(data['Timestamp'])  # Original number of times
            new_n = (
                n / 15 / dt
            ) * 15 * dt  # Truncated number of times equally divisible by dt
            new_shape = (n / 15 / dt, dt, 15)  # New shape of truncated arrays
            times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt],
                         format='lv')
        else:
            times = Time(data['Timestamp'].astype('int')[::15], format='lv')
        hlev = data['Ante_Fron_FEM_HPol_Regi_Level']
        vlev = data['Ante_Fron_FEM_VPol_Regi_Level']
        ms = data['Ante_Fron_FEM_Clockms']
        nt = len(hlev) / 15
        hlev.shape = (nt, 15)
        vlev.shape = (nt, 15)
        ms.shape = (nt, 15)
        # Find any entries for which Clockms is zero, which indicates where no
        # gain-state measurement is available.
        for i in range(15):
            bad, = np.where(ms[:, i] == 0)
            if bad.size != 0 and bad.size != nt:
                # Find nearest adjacent good value
                good, = np.where(ms[:, i] != 0)
                idx = nearest_val_idx(bad, good)
                hlev[bad, i] = hlev[good[idx], i]
                vlev[bad, i] = vlev[good[idx], i]
        if dt:
            # If we want other than full cadence, find mean over dt measurements
            hlev = np.mean(hlev[:new_n / 15].reshape(new_shape), 1)
            vlev = np.mean(vlev[:new_n / 15].reshape(new_shape), 1)
        # Put results in canonical order [nant, nt]
        hlev = hlev.T
        vlev = vlev.T
    else:
        print 'Error reading FEM levels:', msg
        return {}
    # Get back end attenuator states
    xml, buf = ch.read_cal(2, t=trange[0])
    dcmattn = stf.extract(buf, xml['Attenuation'])
    dcmattn.shape = (34, 15, 2)
    # Put into canonical order [nant, npol, nband]
    dcmattn = np.moveaxis(dcmattn, 0, 2)
    # See if DPP offset is enabled
    query = 'select Timestamp,DPPoffsetattn_on from fV' \
            +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        dppon = data['DPPoffsetattn_on']
        if np.where(dppon > 0)[0].size == 0:
            dcm_off = None
        else:
            query = 'select Timestamp,DCMoffset_attn from fV' \
                    +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
            data, msg = db.do_query(cursor, query)
            if msg == 'Success':
                otimes = Time(data['Timestamp'].astype('int')[::15],
                              format='lv')
                dcmoff = data['DCMoffset_attn']
                dcmoff.shape = (nt, 50)
                # We now have a time-history of offsets, at least some of which are non-zero.
                # Offsets by slot number do us no good, so we need to translate to band number.
                # Get fseqfile name at mean of timerange, from stateframe SQL database
                fseqfile = get_fseqfile(
                    Time(int(np.mean(trange.lv)), format='lv'))
                if fseqfile is None:
                    print 'Error: No active fseq file.'
                    dcm_off = None
                else:
                    # Get fseqfile from ACC and return bandlist
                    bandlist = fseqfile2bandlist(fseqfile)
                    # Use bandlist to covert nt x 50 array to nt x 34 band array of DCM attn offsets
                    # Note that this assumes DCM offset is the same for any multiply-sampled bands
                    # in the sequence.
                    dcm_off = np.zeros((nt, 34), float)
                    dcm_off[:, bandlist - 1] = dcmoff
                    # Put into canonical order [nband, nt]
                    dcm_off = dcm_off.T
                    if dt:
                        # If we want other than full cadence, find mean over dt measurements
                        new_nt = len(times)
                        dcm_off = dcm_off[:, :new_nt * dt]
                        dcm_off.shape = (34, dt, new_nt)
                        dcm_off = np.mean(dcm_off, 1)
            else:
                print 'Error reading DCM attenuations:', msg
                dcm_off = None
    else:
        print 'Error reading DPPon state:', msg
        dcm_off = None
    cursor.close()
    return {
        'times': times,
        'hlev': hlev.astype(int),
        'vlev': vlev.astype(int),
        'dcmattn': dcmattn,
        'dcmoff': dcm_off
    }
Пример #30
0
def TPcal(x, y, calfac, offsun):
    ''' Writes Total Power calibration factors and offsun IF level
        to SQL database (caltype = 1)
    '''
    # *******
    # Version has to be updated at same time as xml description in cal_header.py
    # *******
    version = 1.0
    fghz = x['fghz']
    nf = len(fghz)
    tstamp = x['ut'][0]  # Start time of SOLPNTCAL
    dims = calfac.shape
    buf = ''
    if nf == 448:
        buf = struct.pack('d', tstamp)
        buf += struct.pack('d', version)
        # Case of 448 frequencies only
        # Array dimension for frequency list
        buf += struct.pack('I', 448)
        # Frequency list
        buf += struct.pack('448f', *fghz)
        # Polarization array (dimension, then two states--XX, YY)
        buf += struct.pack('Iii', *[2, -5, -6])
        # Array dimension for Antenna cluster (2.1 m ants only)
        buf += struct.pack('I', 13)
        # Empty array for filling in for missing antennas
        empty = np.zeros(448, 'float')
        for i in range(dims[2]):
            # Array dimensions for freq/poln for this antenna
            buf += struct.pack('2I', *[448, 2])
            # Cal factors for the two polarizations
            buf += struct.pack('448f', *calfac[0, :, i])
            buf += struct.pack('448f', *calfac[1, :, i])
            # Array dimensions for freq/poln for this antenna
            buf += struct.pack('2I', *[448, 2])
            # Offsun IF level for the two polarizations
            buf += struct.pack('448f', *offsun[0, :, i])
            buf += struct.pack('448f', *offsun[1, :, i])
        for i in range(dims[2], 13):
            # Same as above for missing antennas
            buf += struct.pack('2I', *[448, 2])
            buf += struct.pack('448f', *empty)
            buf += struct.pack('448f', *empty)
            buf += struct.pack('2I', *[448, 2])
            buf += struct.pack('448f', *empty)
            buf += struct.pack('448f', *empty)
        t = Time.now()
        timestamp = t.lv
        cursor = dbutil.get_cursor()
        cursor.execute(
            'insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
            timestamp, 1.0 + version / 10., 'Total Power Calibration',
            dbutil.stateframedef.pyodbc.Binary(buf))
        # *******
        #  NB! To retrieve these large binary data strings, one must declare text size on select, e.g.
        #         cursor.execute('set textsize 100000 select * from aBin where version = 1.1 ')
        #  where the given size is greater than the size desired.
        # *******

        # Temporarily store in disk file for checking format...
        #f = open('/tmp/tpcal.dat','wb')
        #f.write(buf)
        #f.close()

        cursor.commit()
        cursor.close()
Пример #31
0
def offsets2ants(t,xoff,yoff,ant_str=None):
    ''' Given a start time (Time object) and a list of offsets output by sp_offsets()
        for 13 antennas, convert to pointing coefficients (multiply by 10000),
        add to coefficients listed in stateframe, and send to the relevant 
        antennas.  The antennas to update are specified with ant_str 
        (defaults to no antennas, for safety).        
    '''
    def send_cmds(cmds,acc):
        ''' Sends a series of commands to ACC.  The sequence of commands
            is not checked for validity!
            
            cmds   a list of strings, each of which must be a valid command
        '''
        import socket

        for cmd in cmds:
            #print 'Command:',cmd
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            try:
                s.connect((acc['host'],acc['scdport']))
                s.send(cmd)
                time.sleep(0.01)
                s.close()
            except:
                print 'Error: Could not send command',cmd,' to ACC.'
        return

    oldant = [8,9,10,12]
    if ant_str is None:
        print 'No antenna list specified, so there is nothing to do!'
        return

    try:
        timestamp = int(Time(t,format='mjd').lv)
    except:
        print 'Error interpreting time as Time() object'
        return
    from util import ant_str2list
    import dbutil as db
    import stateframe as stf
    accini = stf.rd_ACCfile()
    acc = {'host': accini['host'], 'scdport':accini['scdport']}
    antlist = ant_str2list(ant_str)
    if antlist is None:
        return
    cursor = db.get_cursor()
    # Read current stateframe data (as of 10 s ago)
    D15data = db.get_dbrecs(cursor,dimension=15,timestamp=timestamp,nrecs=1)
    p1_cur, = D15data['Ante_Cont_PointingCoefficient1']
    p7_cur, = D15data['Ante_Cont_PointingCoefficient7']
    
    for i in antlist:
        if i in oldant:
            # Change sign of RA offset to be HA, for old antennas (9, 10, 11 or 13)
            p1_inc = int(-xoff[i]*10000)
        else:
            p1_inc = int(xoff[i]*10000)
        p7_inc = int(yoff[i]*10000)
        p1_new = p1_cur[i] + p1_inc
        p7_new = p7_cur[i] + p7_inc
        print 'Updating P1 for Ant',i+1,'P1_old =',p1_cur[i],'P1_inc =',p1_inc,'P1_new =',p1_new
        cmd1 = 'pointingcoefficient1 '+str(p1_new)+' ant'+str(i+1)
        print 'Updating P7 for Ant',i+1,'P7_old =',p7_cur[i],'P7_inc =',p7_inc,'P7_new =',p7_new
        cmd7 = 'pointingcoefficient7 '+str(p7_new)+' ant'+str(i+1)
        print 'Commands to be sent:'
        print cmd1
        print cmd7
        send_cmds([cmd1],acc)
        send_cmds([cmd7],acc)
Пример #32
0
def get_state_idx(trange, cycles=4, attenuator=1):
    #This program creates an array of shape (6, 4) which contains the 
    #  times in which each attenuator is in each state. 6 attenuators,
    #  4 cycles. 
    firstorsecond = ['First', 'Second']
    s = sp.Spectrogram(trange)
    s.docal = False
    s.dosub = False
    s.domedian = False
    cursor = dbutil.get_cursor()
    res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange[0].lv)+' and '+str(trange[1].lv))
    cursor.close()
    if msg == 'Success':
        antlist = []
        for i in [0, 1, 2, 4, 8, 16]:
            statelist = []
            for j in range(15):
                state, = np.where(np.logical_and(res['Ante_Fron_FEM_HPol_Atte_' + firstorsecond[attenuator]][j::15].astype('int') == i,res['Ante_Fron_FEM_HPol_Atte_' + firstorsecond[attenuator-1]][j::15].astype('int') != 0))
                statelist.append(state)
            statelist = np.array(statelist)
            antlist.append(statelist)
        states = np.array(antlist)
        states = np.rollaxis(states, 1)
        for i in range(15):
            for j in range(6):
                states[i, j] = res['Timestamp'][i::15][states[i, j]]
    else:
        print 'failure'
        return None
    time_array = (s.time.lv+0.001).astype('int')
    time_list = list(time_array)
    attns = ['0', '1', '2', '4', '8', '16']
    common_list = []
    for j in range(6):
        # Antenna 1 is used as the reference antenna here. 
        #  Earlier versions had only indices which were shared 
        #  for attenuations AND antennas, but because of a
        #  small timing error that occur between antennas
        #  during the scan itself, this older version would
        #  fail sometimes. 
        i1, i2 = util.common_val_idx(time_array,states[0,j])
        if i1.shape == i2.shape:
            common_ant_list = i2
        else: 
            print 'There is a problem with antenna '+str(i)+' at attenuation '+attns[j]
        common_list.append(common_ant_list)
    
    final_indices = []
    final_indices1 = []
    for i in range(6):
        index_list = []
        for indxs in common_list[i]:
            try:
                index_list.append(time_list.index(states[0,i][indxs]))
            except:
                pass
        final_indices1.append(index_list)
    for i in range(6):
        indices_array = np.array(final_indices1[i])
        final_indices.append(indices_array)
    final_indices = np.array(final_indices) 

    rolled_indices = []
    for i in range(6):
        rolled = np.roll(final_indices[i], -1)
        rolled_indices.append(rolled)
    subtracted_list = []
    for j in range(6):
        subtracted_list.append(rolled_indices[j] - final_indices[j])
    break_lists = []
    for k in range(6):
        break_list = []
        for indx in range(subtracted_list[k].shape[0]):
            if np.absolute(subtracted_list[k][indx]) <= 2:
                break_list.append(indx)
            else: 
                break_list.append(-1)
        break_lists.append(break_list)
    for i in range(6):
        for indx in range(int(len(break_lists[i]))-1):
            try:
                if break_lists[i][indx] == break_lists[i][indx-1]:
                    break_lists[i].pop(indx)
            except:
                pass
    break_list = []
    for j in range(6):
        breaklist = np.array(break_lists[j])
        break_list.append(breaklist)
    break_spots = []
    for i in range(6):
        try:
            break_spot = []
            for indx in range(len(break_list[i])):
                if break_list[i][indx] == -1:
                    break_spot.append(indx)
            break_spots.append(break_spot)
        except:
            pass
    split_lists = []
    for k in range(6):
        steps_list = [break_list[k][0:break_spots[k][0]]]
        for j in range(cycles-1):
            try:
                steps_list.append(break_list[k][1 + break_spots[k][j]:break_spots[k][j+1]])
            except:
                pass            
        split_lists.append(steps_list)
    split_lists = np.array(split_lists)  
    final_grouped_indices = []
    for i in range(6):
        grouped_indices = []
        for j in range(cycles):
            try:
                indices_ = []
                for indxs in split_lists[i][j]:
                    indices_.append(rolled_indices[i][indxs])
                grouped_indices.append(indices_)
            except:
                pass
        final_grouped_indices.append(grouped_indices)
    final_grouped_indices = np.array(final_grouped_indices)
    for i in range(6):
        for j in range(cycles):
            try:
                for k in range(1,int(len(final_grouped_indices[i][j]))-1):
                    try:
                        for m in range(len(final_ped_indices[i][j])):
                            if (final_grouped_indices[i][j][k-1] + 3) <= final_grouped_indices[i][j][k]:
                                final_grouped_indices[i][j].pop(k-1) 
                            if (final_grouped_indices[i][j][k+1]-3) >= final_grouped_indices[i][j][k]:
                                final_grouped_indices[i][j].pop(k+1)       
                    except:
                        pass
            except:
                pass
    return final_grouped_indices, res
Пример #33
0
def DCM_attn_anal(filename):
    ''' Analyze a DCMATTNTEST observation to determine the 2- and 4-bit
        attenuation values.  Input is a Miriad file.  Returns two arrays, 
           at2 and at4 of size (nant,npol) = (13,2)
        representing the attenuation, in dB, of the 2- and 4-bit, resp.
    '''
    import read_idb as ri
    import dbutil as db
    import cal_header as ch
    import stateframe as stf
    import copy
    from util import Time
    import matplotlib.pylab as plt

    out = ri.read_idb([filename])
    ts = int(Time(out['time'][0], format='jd').lv + 0.5)
    te = int(Time(out['time'][-1], format='jd').lv + 0.5)
    query = 'select Timestamp,DCM_Offset_Attn from fV65_vD15 where Timestamp between ' + str(
        ts) + ' and ' + str(te) + ' order by Timestamp'
    cursor = db.get_cursor()
    data, msg = db.do_query(cursor, query)
    cursor.close()
    dcm_offset = data['DCM_Offset_Attn'].reshape(
        len(data['DCM_Offset_Attn']) / 15, 15)
    dcm_offset = dcm_offset[:, 0]  # All antennas are the same
    t = Time(out['time'][0], format='jd')
    xml, buf = ch.read_cal(2, t)
    table = stf.extract(buf, xml['Attenuation'])
    bandlist = ((out['fghz'] - 0.5) * 2).astype(int)
    tbl = table[bandlist - 1]
    tbl.shape = (len(bandlist), 15, 2)
    tbl = np.swapaxes(np.swapaxes(tbl, 0, -1), 0, 1)
    tbl2 = np.broadcast_to(tbl, (out['time'].shape[0], 15, 2, 134))
    tbl = copy.copy(np.rollaxis(tbl2, 0, 4))  # Shape (nant,npol,nf,nt)
    pwr = out['p'][:15]  # Shape (nant,npol,nf,nt)
    # Add value of dcm_offset to table
    for i, offset in enumerate(dcm_offset):
        tbl[:, :, :, i] += offset
    # Clip to valid attenuations
    tbl = np.clip(tbl, 0, 30)
    # Isolate good times in various attn states
    goodm2, = np.where(dcm_offset == -2)
    goodm2 = goodm2[2:-3]
    good2, = np.where(dcm_offset == 2)
    good2 = good2[2:-3]
    good0, = np.where(dcm_offset[goodm2[-1]:good2[0]] == 0)
    good0 += goodm2[-1]
    good0 = good0[2:-3]
    good4, = np.where(dcm_offset == 4)
    good4 = good4[2:-3]
    good6, = np.where(dcm_offset == 6)
    good6 = good6[2:-3]
    goodbg = good6 + 30  # Assumes FEMATTN 15 follows good6 30 s later
    # Perform median over good times and create pwrmed with medians
    # The 5 indexes correspond to dcm_offsets -2, 0, 2, 4 and 6
    nant, npol, nf, nt = pwr.shape
    pwrmed = np.zeros((nant, npol, nf, 5))
    # Do not forget to subtract the background
    bg = np.median(pwr[:, :, :, goodbg], 3)
    pwrmed[:, :, :, 0] = np.median(pwr[:, :, :, goodm2], 3) - bg
    pwrmed[:, :, :, 1] = np.median(pwr[:, :, :, good0], 3) - bg
    pwrmed[:, :, :, 2] = np.median(pwr[:, :, :, good2], 3) - bg
    pwrmed[:, :, :, 3] = np.median(pwr[:, :, :, good4], 3) - bg
    pwrmed[:, :, :, 4] = np.median(pwr[:, :, :, good6], 3) - bg
    good = np.array([goodm2[0], good0[0], good2[0], good4[0], good6[0]])
    tbl = tbl[:, :, :, good]
    at2 = np.zeros((13, 2), float)
    at4 = np.zeros((13, 2), float)
    at8 = np.zeros((13, 2), float)
    f1, ax1 = plt.subplots(2, 13)
    f2, ax2 = plt.subplots(2, 13)
    f3, ax3 = plt.subplots(2, 13)
    for ant in range(13):
        for pol in range(2):
            pts = []
            for i in range(4):
                for v in [0, 4, 8, 12, 16, 20, 24, 28]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          2) == tbl[ant, pol, idx, i + 1])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 1])
            pts = np.concatenate(pts)
            ax1[pol, ant].plot(pts, '.')
            ax1[pol, ant].set_ylim(0, 2)
            at2[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            for i in range(3):
                for v in [0, 2, 8, 10, 16, 18, 24, 26]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          4) == tbl[ant, pol, idx, i + 2])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 2])
            pts = np.concatenate(pts)
            ax2[pol, ant].plot(pts, '.')
            ax2[pol, ant].set_ylim(0, 3)
            at4[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            i = 0
            for v in [0, 2, 4, 6, 16, 18, 20, 22]:
                idx, = np.where(tbl[ant, pol, :, i] == v)
                if len(idx) != 0:
                    good, = np.where((tbl[ant, pol, idx, i] + 8) == tbl[ant,
                                                                        pol,
                                                                        idx,
                                                                        i + 4])
                    if len(good) != 0:
                        pts.append(pwrmed[ant, pol, idx[good], i] /
                                   pwrmed[ant, pol, idx[good], i + 4])
            try:
                pts = np.concatenate(pts)
            except:
                # Looks like there were no points for this antenna/polarization, so set to nominal attn
                pts = [6.30957, 6.30957, 6.30957]
            ax3[pol, ant].plot(pts, '.')
            ax3[pol, ant].set_ylim(5, 8)
            at8[ant, pol] = np.log10(np.median(pts)) * 10.
    plt.show()
    # Generate output table, a complex array of size (nant,npol,nbits)
    attn = np.zeros((16, 2, 4), np.complex)
    # Set to nominal values, then overwrite with measured ones
    for i in range(16):
        for j in range(2):
            attn[i, j] = [2.0 + 0j, 4.0 + 0j, 8.0 + 0j, 16.0 + 0j]
    attn[:13, :, 0] = at2 + 0j
    attn[:13, :, 1] = at4 + 0j
    attn[:13, :, 2] = at8 + 0j
    return attn
Пример #34
0
def TPcal(x, y, calfac, offsun):
    ''' Writes Total Power calibration factors and offsun IF level
        to SQL database (caltype = 1)
    '''
    # *******
    # Version has to be updated at same time as xml description in cal_header.py
    # *******
    version = 1.0  
    fghz = x['fghz']
    nf = len(fghz)
    tstamp = x['ut'][0] # Start time of SOLPNTCAL
    dims = calfac.shape
    buf = ''
    if nf == 448:
        buf = struct.pack('d',tstamp)
        buf += struct.pack('d',version)
        # Case of 448 frequencies only
        # Array dimension for frequency list
        buf += struct.pack('I',448)
        # Frequency list
        buf += struct.pack('448f',*fghz)
        # Polarization array (dimension, then two states--XX, YY)
        buf += struct.pack('Iii',*[2,-5,-6])
        # Array dimension for Antenna cluster (2.1 m ants only)
        buf += struct.pack('I',13)
        # Empty array for filling in for missing antennas
        empty = np.zeros(448,'float')
        for i in range(dims[2]):
            # Array dimensions for freq/poln for this antenna
            buf += struct.pack('2I',*[448,2])
            # Cal factors for the two polarizations
            buf += struct.pack('448f',*calfac[0,:,i])
            buf += struct.pack('448f',*calfac[1,:,i])
            # Array dimensions for freq/poln for this antenna
            buf += struct.pack('2I',*[448,2])
            # Offsun IF level for the two polarizations
            buf += struct.pack('448f',*offsun[0,:,i])
            buf += struct.pack('448f',*offsun[1,:,i])
        for i in range(dims[2],13):
            # Same as above for missing antennas
            buf += struct.pack('2I',*[448,2])
            buf += struct.pack('448f',*empty)
            buf += struct.pack('448f',*empty)
            buf += struct.pack('2I',*[448,2])
            buf += struct.pack('448f',*empty)
            buf += struct.pack('448f',*empty)
        t = Time.now()
        timestamp = t.lv
        cursor = dbutil.get_cursor()
        cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp,1.0+version/10.,'Total Power Calibration',dbutil.stateframedef.pyodbc.Binary(buf))
        # *******
        #  NB! To retrieve these large binary data strings, one must declare text size on select, e.g.
        #         cursor.execute('set textsize 100000 select * from aBin where version = 1.1 ')
        #  where the given size is greater than the size desired.
        # *******
        
        # Temporarily store in disk file for checking format...
        #f = open('/tmp/tpcal.dat','wb')
        #f.write(buf)
        #f.close()

        cursor.commit()
        cursor.close()
Пример #35
0
def gain_state(trange=None):
    ''' Read and assemble the gain state for the given timerange from 
        the SQL database, or for the last 10 minutes if trange is None.
        
        Returns the complex attenuation of the FEM for the timerange
        as an array of size (nant, npol, ntimes) [not band dependent],
        and the complex attenuation of the DCM for the same timerange
        as an array of size (nant, npol, nbands, ntimes).  Also returns
        the time as a Time() object array.
    '''
    from util import Time
    import dbutil as db
    from fem_attn_calib import fem_attn_update
    import cal_header as ch

    if trange is None:
        t = Time.now()
        t2 = Time(t.jd - 600. / 86400., format='jd')
        trange = Time([t2.iso, t.iso])
    ts = trange[0].lv  # Start timestamp
    te = trange[1].lv  # End timestamp
    cursor = db.get_cursor()
    # First get FEM attenuation for timerange
    D15dict = db.get_dbrecs(cursor, dimension=15, timestamp=trange)
    DCMoffdict = db.get_dbrecs(cursor, dimension=50, timestamp=trange)
    DCMoff_v_slot = DCMoffdict['DCMoffset_attn']
    #    DCMoff_0 = D15dict['DCM_Offset_Attn'][:,0]  # All ants are the same
    fem_attn = {}
    fem_attn['timestamp'] = D15dict['Timestamp'][:, 0]
    nt = len(fem_attn['timestamp'])
    junk = np.zeros([nt, 1], dtype='int')  #add the non-existing antenna 16
    fem_attn['h1'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_First'],
                               junk,
                               axis=1)  #FEM hpol first attn value
    fem_attn['h2'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM hpol second attn value
    fem_attn['v1'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_First'],
                               junk,
                               axis=1)  #FEM vpol first attn value
    fem_attn['v2'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM vpol second attn value
    fem_attn['ants'] = np.append(D15dict['I15'][0, :], [15])
    # Add corrections from SQL database for start time of timerange
    fem_attn_corr = fem_attn_update(fem_attn, trange[0])
    # Next get DCM attenuation for timerange
    # Getting next earlier scan header
    ver = db.find_table_version(cursor, ts, True)
    query = 'select top 50 Timestamp,FSeqList from hV' + ver + '_vD50 where Timestamp <= ' + str(
        ts) + ' order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist) - 0.44) * 2).astype(int)
    cursor.close()
    # Read current DCM_table from database
    xml, buf = ch.read_cal(3, trange[0])
    orig_table = stf.extract(buf, xml['Attenuation']).astype('int')
    orig_table.shape = (50, 15, 2)
    xml, buf = ch.read_cal(6, trange[0])
    dcm_attn_bitv = np.nan_to_num(stf.extract(
        buf, xml['DCM_Attn_Real'])) + np.nan_to_num(
            stf.extract(buf, xml['DCM_Attn_Imag'])) * 1j
    #    # Add one more bit (all zeros) to take care of unit bit
    #    dcm_attn_bitv = np.concatenate((np.zeros((16,2,1),'int'),dcm_attn_bitv),axis=2)
    # We now have:
    #   orig_table     the original DCM at start of scan, size (nslot, nant=15, npol)
    #   DCMoff_0       the offset applied to all antennas and slots (ntimes)
    #   DCMoff_v_slot  the offest applied to all antennas but varies by slot (ntimes, nslot)
    #   dcm_attn_bitv  the measured (non-nominal) attenuations for each bit value (nant=16, npol, nbit) -- complex
    # Now I need to convert slot to band, add appropriately, and organize as (nant=16, npol, nband, ntimes)
    # Add one more antenna (all zeros) to orig_table
    orig_table = np.concatenate((orig_table, np.zeros((50, 1, 2), 'int')),
                                axis=1)
    ntimes, nslot = DCMoff_v_slot.shape
    dcm_attn = np.zeros((16, 2, 34, ntimes), np.int)
    for i in range(ntimes):
        for j in range(50):
            idx = bandlist[j] - 1
            # This adds attenuation for repeated bands--hopefully the same value for each repeat
            dcm_attn[:, :, idx, i] += orig_table[j, :, :] + DCMoff_v_slot[i, j]
    # Normalize repeated bands by finding number of repeats and dividing.
    for i in range(1, 35):
        n = len(np.where(bandlist == i)[0])
        if n > 1:
            dcm_attn[:, :, i - 1, :] /= n
    # Make sure attenuation is in range
    dcm_attn = np.clip(dcm_attn, 0, 30)
    # Finally, correct for non-nominal (measured) bit values
    # Start with 0 attenuation as reference
    dcm_attn_corr = dcm_attn * (0 + 0j)
    att = np.zeros((16, 2, 34, ntimes, 5), np.complex)
    # Calculate resulting attenuation based on bit attn values (2,4,8,16)
    for i in range(4):
        # Need dcm_attn_bitv[...,i] to be same shape as dcm_attn
        bigger_bitv = np.broadcast_to(dcm_attn_bitv[..., i],
                                      (ntimes, 34, 16, 2))
        bigger_bitv = np.swapaxes(
            np.swapaxes(np.swapaxes(bigger_bitv, 0, 3), 1, 2), 0, 1)
        att[..., i] = (np.bitwise_and(dcm_attn, 2**(i + 1)) >>
                       (i + 1)) * bigger_bitv
        dcm_attn_corr = dcm_attn_corr + att[..., i]

    # Move ntimes column to next to last position, and then sum over last column (the two attenuators)
    fem_attn_corr = np.sum(np.rollaxis(fem_attn_corr, 0, 3), 3)
    # Output is FEM shape (nant, npol, ntimes) = (16, 2, ntimes)
    #           DCM shape (nant, npol, nband, ntimes) = (16, 2, 34, ntimes)
    # Arrays are complex, in dB units
    tjd = Time(fem_attn['timestamp'].astype('int'), format='lv').jd
    return fem_attn_corr, dcm_attn_corr, tjd
Пример #36
0
def tp_bgnd_all(tpdata):
    ''' Create time-variable background from ROACH inlet temperature
        This version is far superior to the earlier, crude version, but
        beware that it works best for a long timerange of data, especially
        when there is a flare in the data.
        
        Inputs:
          tpdata   dictionary returned by read_idb()  NB: tpdata is not changed.
          
        Returns:
          bgnd     The background fluctuation array of size (nf,nt) to be 
                     subtracted from any antenna's total power (or mean of
                     antenna total powers)
    '''
    import dbutil as db
    from util import Time, nearest_val_idx
    outfghz = tpdata['fghz']
    try:
        outtime = tpdata['time']
        trange = Time(outtime[[0, -1]], format='jd')
    except:
        outtime = tpdata['ut_mjd']
        trange = Time(outtime[[0, -1]], format='mjd')

    nt = len(outtime)
    if nt < 1200:
        print 'TP_BGND: Error, timebase too small.  Must have at least 1200 time samples.'
        return None
    nf = len(outfghz)
    outpd = Time(outtime, format='jd').plot_date
    cursor = db.get_cursor()
    data = db.get_dbrecs(cursor, dimension=8, timestamp=trange)
    pd = Time(data['Timestamp'][:, 0].astype(int), format='lv').plot_date
    inlet = data['Sche_Data_Roac_TempInlet']  # Inlet temperature variation
    sinlet = np.sum(inlet.astype(float), 1)
    # Eliminate 0 values in sinlet by replacing with nearest good value
    bad, = np.where(sinlet == 0)
    good, = np.where(sinlet != 0)
    idx = nearest_val_idx(
        bad, good)  # Find locations of nearest good values to bad ones
    sinlet[bad] = sinlet[good[idx]]  # Overwrite bad values with good ones
    sinlet -= np.mean(
        sinlet)  # Remove offset, to provide zero-mean fluctuation
    sinlet = np.roll(
        sinlet,
        -110)  # Shift phase of variation by 110 s earlier (seems to be needed)
    # Interpolate sinlet values to the times in the data
    sint = np.interp(outpd, pd, sinlet)
    #    sint = np.roll(sint,-90)  # Shift phase of variation by 90 s earlier
    #    sint -= np.mean(sint)     # Remove offset, to provide zero-mean fluctuation
    sdev = np.std(sint)
    sint_ok = np.abs(sint) < 2 * sdev
    bgnd = np.zeros((13, 2, nf, nt), float)
    for ant in range(13):
        for pol in range(2):
            for i in range(nf):
                # Subtract smooth trend from data
                nt = len(tpdata['p'][ant, pol, i])
                wlen = min(nt, 2000)
                if wlen % 2 != 0:
                    wlen -= 1
                sig = tpdata['p'][ant, pol, i] - smooth(
                    tpdata['p'][ant, pol, i], wlen,
                    'blackman')[wlen / 2:-(wlen / 2 - 1)]
                # Eliminate the worst outliers and repeat
                stdev = np.nanstd(sig)
                good, = np.where(np.abs(sig) < 2 * stdev)
                if len(good) > nt * 0.1:
                    wlen = min(len(good), 2000)
                    if wlen % 2 != 0:
                        wlen -= 1
                    sig = tpdata['p'][ant, pol, i, good] - smooth(
                        tpdata['p'][ant, pol, i, good], wlen,
                        'blackman')[wlen / 2:-(wlen / 2 - 1)]
                    sint_i = sint[good]
                    stdev = np.std(sig)
                    # Final check for data quality
                    good, = np.where(
                        np.logical_and(sig < 2 * stdev, sint_ok[good]))
                    if len(good) > nt * 0.1:
                        p = np.polyfit(sint_i[good], sig[good], 1)
                    else:
                        p = [1., 0.]
                    # Apply correction for this frequency
                    bgnd[ant, pol, i] = sint * p[0] + p[1]
    return bgnd