Example #1
0
def get_projects(t, nosql=False):
    ''' Read all projects from SQL for the current date and return a summary
        as a dictionary with keys Timestamp, Project, and EOS (another timestamp)
    '''
    if nosql == True:
        return get_projects_nosql(t)
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor, trange[0].lv, True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return {}
    query = 'select Timestamp,Project from hV' + verstrh + '_vD1 where Timestamp between ' + tstart + ' and ' + tend + ' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return {}
    elif len(projdict) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return {}
    projdict['Timestamp'] = projdict['Timestamp'].astype(
        'float')  # Convert timestamps from string to float
    for i in range(len(projdict['Project'])):
        projdict['Project'][i] = projdict['Project'][i].replace('\x00', '')
    projdict.update({'EOS': projdict['Timestamp'][1:]})
    projdict.update({'Timestamp': projdict['Timestamp'][:-1]})
    projdict.update({'Project': projdict['Project'][:-1]})
    cursor.close()
    return projdict
Example #2
0
def read_cal(type,t=None):
    ''' Read the calibration data of the given type, for the given time (as a Time() object),
        or for the current time if None.
        
        Returns a dictionary of look-up information and a binary buffer containing the 
        calibration record.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    xmldict, ver = read_cal_xml(type, t)
    cursor = dbutil.get_cursor()

    if xmldict != {}:
        query = 'set textsize 2147483647 select top 1 * from abin where Version = '+str(type+ver/10.)+' and Timestamp <= '+str(timestamp)+' order by Timestamp desc'
        sqldict, msg = dbutil.do_query(cursor,query)
        cursor.close()
        if msg == 'Success':
            if sqldict == {}:
                print 'Error: Query returned no records.'
                print query
                return {}, None
            buf = sqldict['Bin'][0]   # Binary representation of data
            return xmldict, str(buf)
        else:
            print 'Unknown error occurred reading',typdict[type][0]
            print sys.exc_info()[1]
            return {}, None
    else:
        return {}, None
Example #3
0
def drop_deftable(version):
    ''' Drops ALL traces of a given version of a stateframe definition.
        Use with CAUTION!!!
        
        Requests confirmation from the keyboard.
    '''
    import dbutil as db
    cursor = db.get_cursor()
    # Get all table and view names from this version
    query = 'select * from information_schema.tables where table_name like "fV'+str(int(version))+'%"'
    result, msg = db.do_query(cursor, query)
    if msg == 'Success':
        names = result['TABLE_NAME']
        print 'You are about to permanently delete the following tables:'
        for name in names:
            print '    ',name
        ans = raw_input('Are you sure? [y/n]: ').upper()
        if ans != 'Y':
            print 'Action canceled by user'
            return False
        # Delete the version from the stateframedef table
        query = 'delete from StateFrameDef where Version='+str(int(version))
        r, msg = db.do_query(cursor, query)
        if msg != 'Success':
            print 'Error, could not delete from stateframedef for version:',version
            print msg
            return False
        # Loop over table and view names, dropping each in turn
        for name in names:
            query = 'drop table '+name
            r, msg = db.do_query(cursor, query)
            if msg != 'Success':
                print 'Error dropping table',name
                print msg
        # Drop Bin Reader procedure
        query = 'drop proc ov_fBinReader_V'+str(int(version))
        r, msg = db.do_query(cursor, query)
        if msg != 'Success':
            print 'Error, could not delete Bin Reader procedure for version:',version
            print msg
            return False
    else:
        return False
    print 'Successfully dropped all existing tables and table definition for version', version
    return True
Example #4
0
def write_cal(type,buf,t=None):
    ''' Write the calibration data provided in data buffer buf of the given type, 
        for the given time (as a Time() object), or for the current time if None.
        Typcially, the time should refer to when the calibration data were taken,
        so the correct time object should be provided.
        
        The type keyword is a real number whose integer part indicates the type
        definition.  The fractional part must not be 0 (since this would indicate
        a type definition record rather than a data record).  The relevant type 
        definition is read from the database, and its total size is determined and 
        compared with the buffer size, as a sanity check.
        
        Returns True if success, or False if failure.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    typdict = cal_types()
    try:
        typinfo = typdict[int(type)]
    except:
        print 'Type',int(type),'not found in type definition dictionary.'
        return False
    cursor = dbutil.get_cursor()
    # Read type definition XML from abin table and do a sanity check
    query = 'select top 1 * from abin where Version = '+str(int(type))+'.0 and Timestamp <='+str(timestamp)+' order by Timestamp desc'
    outdict, msg = dbutil.do_query(cursor,query)
    if msg == 'Success':
        if len(outdict) == 0:
            # This type of xml file does not yet exist in the database, so indicate an error
            print 'Error: Type',type,'not defined in abin table.'
            cursor.close()
            return False
        else:
            # There is one, so read it and do a sanity check against binary data
            f = open('/tmp/tmp.xml','wb')
            f.write(outdict['Bin'][0])
            f.close()
            keys, mydict, fmt, ver = read_xml2.xml_read('/tmp/tmp.xml')
            binsize = get_size(fmt)
            if len(buf) == binsize:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, type+ver/10., typinfo[0], dbutil.stateframedef.pyodbc.Binary(buf))
                cursor.commit()
                cursor.close()
                return True
            else:
                print 'Error: Size of buffer',len(buf),'does not match this calibration type.  Expecting',binsize
                cursor.close()
                return False
Example #5
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    verstr = dbutil.find_table_version(cursor, tstart)
    if verstr is None:
        print 'No stateframe table found for given time.'
        return tstart, [], {}
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage,Ante_Fron_FEM_VPol_Voltage from fV' + verstr + '_vD15 where timestamp between ' + tstart + ' and ' + tend + ' order by timestamp'
    data, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return tstart, [], {}
    for k, v in data.items():
        data[k].shape = (len(data[k]) / 15, 15)
    hv = []
    try:
        ut = Time(data['Timestamp'][:, 0].astype('float'),
                  format='lv').plot_date
    except:
        print 'Error for time', t.iso
        print 'Query:', query, ' returned msg:', msg
        print 'Keys:', data.keys()
        print data['Timestamp'][0, 0]
    hfac = np.median(data['Ante_Fron_FEM_HPol_Voltage'].astype('float'), 0)
    vfac = np.median(data['Ante_Fron_FEM_VPol_Voltage'].astype('float'), 0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_HPol_Voltage'][:, i] / hfac[i])
        if vfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_VPol_Voltage'][:, i] / vfac[i])
    #import pdb; pdb.set_trace()
    flm = np.median(np.array(hv), 0)
    good = np.where(abs(flm[1:] - flm[:-1]) < 0.1)[0]

    projdict = get_projects(t)
    return ut[good], flm[good], projdict
Example #6
0
def get_median_wind(wthr):
    '''  Temporary work-around for mis-behaving weather station.
         Given the weather dictionary, query the SQL database for
         the last 120-s of wind data, and calculate median rather
         than average.  I hope this does not take too long!  Returns
         the same dictionary, with median replacing average wind.
    '''
    import dbutil as db
    cursor = db.get_cursor()
    query = 'select top 120 Timestamp,Sche_Data_Weat_Wind from fV66_vD1 order by Timestamp desc'
    data, msg = db.do_query(cursor,query)
    if msg == 'Success':
        try:
            medwind = np.median(data['Sche_Data_Weat_Wind'])
            wthr.update({'mt2MinRollAvgWindSpeed': medwind})
        except:
            pass
    cursor.close()
    return wthr
Example #7
0
def get_fseqfile(t=None):
    if t is None:
        # 10 s ago...
        tlv = Time.now().lv - 10
    else: 
        tlv = int(t.lv)
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor,tlv)
    # Get front end attenuator states
    query = 'select Timestamp,LODM_LO1A_FSeqFile from fV'+ver+'_vD1 where Timestamp between '+str(tlv)+' and '+str(tlv+1)+' order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqfile = data['LODM_LO1A_FSeqFile'][0].replace('\x00','')
        if fseqfile == 'none':
            fseqfile = None
    else:
        print 'Error: ',msg
        fseqfile = None
    cursor.close()
    return fseqfile
Example #8
0
def findscans(trange):
    '''Identify phasecal scans from UFDB files
    '''
    import dbutil
    import dump_tsys
    tstart, tend = trange.lv.astype(int).astype(str)
    cursor = dbutil.get_cursor()
    verstr = dbutil.find_table_version(cursor, tstart, True)
    query = 'select Timestamp,Project,SourceID from hV'+verstr+'_vD1 where left(Project,8) = "PHASECAL" and Timestamp between '+tstart+' and '+tend+' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        return {'msg':msg}
    if projdict == {}:
        return {'msg':'No PHASECAL scans for this day'}
    tsint = projdict['Timestamp'].astype(int)
    # Check UFDB file to get duration
    ufdb = dump_tsys.rd_ufdb(Time(int(tstart),format='lv'))
    mjd0 = int(Time(int(tstart),format='lv').mjd)
    mjdnow = int(Time.now().mjd)
    if mjd0 < mjdnow:
        # The date is a previous day, so read a second ufdb file 
        # to ensure we have the whole local day
        try:
            ufdb2 = dump_tsys.rd_ufdb(Time(int(tstart)+86400.,format='lv'))
            for key in ufdb.keys():
                ufdb.update({key: np.append(ufdb[key], ufdb2[key])})
        except:
            # No previous day, so just skip it.
            pass
    ufdb_times = ufdb['ST_TS'].astype(float).astype(int)
    idx = nearest_val_idx(tsint,ufdb_times)
    fpath = '/data1/eovsa/fits/UDB/' + trange[0].iso[:4] + '/'
    dur = []
    file = []
    for i in idx:
        dur.append(((ufdb['EN_TS'].astype(float) - ufdb['ST_TS'].astype(float))[i])/60.)
        file.append(fpath+ufdb['FILE'][i])
    # Fix source ID to remove nulls
    srclist = np.array([str(i.replace('\x00','')) for i in projdict['SourceID']])
    return {'Timestamp': tsint, 'SourceID': srclist, 'duration': np.array(dur), 'filelist':np.array(file), 'msg': msg}
Example #9
0
def read_cal_xml(type,t=None):
    ''' Read the calibration type definition xml record of the given type, for the 
        given time (as a Time() object), or for the current time if None.
        
        Returns a dictionary of look-up information and its internal version.  A side-effect
        is that a file /tmp/type<n>.xml is created, where <n> is the type.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Given (or current) time as LabVIEW timestamp
    typdict = cal_types()
    try:
        typinfo = typdict[type]
    except:
        print 'Type',type,'not found in type definition dictionary.'
        return {}, None
    cursor = dbutil.get_cursor()
    # Read type definition XML from abin table
    query = 'select top 1 * from abin where Version = '+str(type)+'.0 and Timestamp <='+str(timestamp)+' order by Timestamp desc'
    sqldict, msg = dbutil.do_query(cursor,query)
    if msg == 'Success':
        if len(sqldict) == 0:
            # This type of xml file does not yet exist in the database, so mark it for adding
            print 'Type',type,'not defined in abin table.'
            cursor.close()
            return {}, None
        else:
            # There is one, so read it and the corresponding binary data
            buf = sqldict['Bin'][0]   # Binary representation of xml file
            xmlfile = '/tmp/type'+str(type)+'.xml'
            f = open(xmlfile,'wb')
            f.write(buf)
            f.close()
            xmldict, thisver = read_xml2.xml_ptrs(xmlfile)
            cursor.close()
            return xmldict, thisver
Example #10
0
def send_xml2sql(type=None,t=None,test=False,nant=None,nfrq=None):
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.  If type is provided (i.e. not None), only 
        the given type will be updated (and only if its internal version 
        number has changed).
        
        The timestamp of the new record will be set according to the Time()
        object t, if provided, or the current time if not.
        
        As a debugging tool, if test is True, this routine goes through the
        motions but does not write to the abin table.
    '''
    import dbutil, read_xml2, sys
    if t is None:
        t = util.Time.now()
    timestamp = int(t.lv)  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    if type:
        # If a particular type is specified, limit the action to that type
        typdict = {type:typdict[type]}
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        if key == 1:
            # Special case for TP calibration
            if nant is None or nfrq is None:
                print 'For',typdict[key][0],'values for both nant and nfrq are required.'
                cursor.close()
                return
            exec 'buf = '+typdict[key][1]+'(nant='+str(nant)+',nfrq='+str(nfrq)+')'
        else:
            exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+str(key)+'.0 and Timestamp <= '+str(timestamp)+' order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if they differ
                buf2 = outdict['Bin'][0]   # Binary representation of xml file
                if buf == buf2:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                if test:
                    print 'Would have updated',typdict[key][0],'to version',defn_version
                else:
                    cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, key, typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                    print 'Type definition for',typdict[key][0],'successfully added/updated to version',defn_version,'--OK'
                    cursor.commit()
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print 'Type definition for',typdict[key][0],'version',defn_version,'exists--OK'
    cursor.close()
Example #11
0
def DCM_master_attn_cal(update=False):
    ''' New version of this command, which uses the power values in
        the 10gbe packet headers instead of the very slow measurement
        of the ADC levels themselves.  This version only takes about 8 s!
        
        If update is True, it writes the results to the SQL database.
        
        Returns the DCM_master_table in the form of lines of text
        strings, with labels (handy for viewing).
    '''
    pwr = np.zeros((50,8,4),'int')
    # Capture on eth2 interface
    command = 'tcpdump -i eth2 -c 155000 -w /home/user/Python/dcm2.pcap -s 1000'
    p.sendcmd(command)
    # Capture on eth3 interface
    command = 'tcpdump -i eth3 -c 155000 -w /home/user/Python/dcm3.pcap -s 1000'
    p.sendcmd(command)
    headers = p.list_header('/home/user/Python/dcm2.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    headers = p.list_header('/home/user/Python/dcm3.pcap')
    for line in headers:
        try:
            j, id, p1,p2,p3,p4 = np.array(map(int,line.split()))[[0,3,6,7,8,9]]
            pwr[j,id] = (p1, p2, p3, p4)
        except:
            # This is to skip the non-data header lines in the list
            pass
    # Reshape to (slot, nant, npol)
    pwr.shape = (50,16,2)
    # Read current frequency sequence from database
    cursor = db.get_cursor()
    query = 'select top 50 FSeqList from hV37_vD50 order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist)-0.44)*2).astype(int)
    cursor.close()
    # Read current DCM_master_table from database
    xml, buf = ch.read_cal(2)
    orig_table = stf.extract(buf,xml['Attenuation'])
    # Order pwr values according to bandlist, taking median of any repeated values
    new_pwr = np.zeros((34,16,2))
    for i in range(34):
        idx, = np.where(bandlist-1 == i)
        if len(idx) > 0:
            new_pwr[i] = np.median(pwr[idx],0)
    new_pwr.shape = (34,32)
    # Now determine the change in attenuation needed to achieve a target
    # value of 1600.  Eliminate last two entries, corresponding to Ant16
    attn = np.log10(new_pwr[:,:-2]/1600.)*10.
    new_table = (np.clip(orig_table + attn,0,30)/2).astype(int)*2
    DCMlines = []
    DCMlines.append('#      Ant1  Ant2  Ant3  Ant4  Ant5  Ant6  Ant7  Ant8  Ant9 Ant10 Ant11 Ant12 Ant13 Ant14 Ant15')
    DCMlines.append('#      X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y  X  Y')
    DCMlines.append('#     ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----')
    for band in range(1,35):
        DCMlines.append('{:2} :  {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2} {:2}'.format(band,*new_table[band-1]))
    if update:
        msg = ch.dcm_master_table2sql(DCMlines)
        if msg:
            print 'Success'
        else:
            print 'Error writing table to SQL database!'
    return DCMlines
Example #12
0
def get_fem_level(trange, dt=None):
    ''' Get FEM attenuation levels for a given timerange.  Returns a dictionary
        with keys as follows:

        times:     A Time object containing the array of times, size (nt)
        hlev:      The FEM attenuation level for HPol, size (nt, 15) 
        vlev:      The FEM attenuation level for VPol, size (nt, 15)
        dcmattn:   The base DCM attenuations for 34 bands x 15 antennas x 2 Poln, size (34,30)
                      The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc.
        dcmoff:    If DPPoffset-on is 0, this is None (meaning there are no changes to the
                      above base attenuations).  
                   If DPPoffset-on is 1, then dcmoff is a table of offsets to the 
                      base attenuation, size (nt, 50).  The offset applies to all 
                      antennas/polarizations.
                      
        Optional keywords:
           dt      Seconds between entries to read from SQL stateframe database. 
                     If omitted, 1 s is assumed.
        
    '''
    if dt is None:
        tstart, tend = [str(i) for i in trange.lv]
    else:
        # Expand time by 1/2 of dt before and after
        tstart = str(np.round(trange[0].lv - dt / 2))
        tend = str(np.round(trange[1].lv + dt / 2))
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor, trange[0].lv)
    # Get front end attenuator states
    query = 'select Timestamp,Ante_Fron_FEM_Clockms,' \
            +'Ante_Fron_FEM_HPol_Regi_Level,Ante_Fron_FEM_VPol_Regi_Level from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        if dt:
            # If we want other than full cadence, get new array shapes and times
            n = len(data['Timestamp'])  # Original number of times
            new_n = (
                n / 15 / dt
            ) * 15 * dt  # Truncated number of times equally divisible by dt
            new_shape = (n / 15 / dt, dt, 15)  # New shape of truncated arrays
            times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt],
                         format='lv')
        else:
            times = Time(data['Timestamp'].astype('int')[::15], format='lv')
        hlev = data['Ante_Fron_FEM_HPol_Regi_Level']
        vlev = data['Ante_Fron_FEM_VPol_Regi_Level']
        ms = data['Ante_Fron_FEM_Clockms']
        nt = len(hlev) / 15
        hlev.shape = (nt, 15)
        vlev.shape = (nt, 15)
        ms.shape = (nt, 15)
        # Find any entries for which Clockms is zero, which indicates where no
        # gain-state measurement is available.
        for i in range(15):
            bad, = np.where(ms[:, i] == 0)
            if bad.size != 0 and bad.size != nt:
                # Find nearest adjacent good value
                good, = np.where(ms[:, i] != 0)
                idx = nearest_val_idx(bad, good)
                hlev[bad, i] = hlev[good[idx], i]
                vlev[bad, i] = vlev[good[idx], i]
        if dt:
            # If we want other than full cadence, find mean over dt measurements
            hlev = np.mean(hlev[:new_n / 15].reshape(new_shape), 1)
            vlev = np.mean(vlev[:new_n / 15].reshape(new_shape), 1)
        # Put results in canonical order [nant, nt]
        hlev = hlev.T
        vlev = vlev.T
    else:
        print 'Error reading FEM levels:', msg
        return {}
    # Get back end attenuator states
    xml, buf = ch.read_cal(2, t=trange[0])
    dcmattn = stf.extract(buf, xml['Attenuation'])
    dcmattn.shape = (34, 15, 2)
    # Put into canonical order [nant, npol, nband]
    dcmattn = np.moveaxis(dcmattn, 0, 2)
    # See if DPP offset is enabled
    query = 'select Timestamp,DPPoffsetattn_on from fV' \
            +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        dppon = data['DPPoffsetattn_on']
        if np.where(dppon > 0)[0].size == 0:
            dcm_off = None
        else:
            query = 'select Timestamp,DCMoffset_attn from fV' \
                    +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend+' order by Timestamp'
            data, msg = db.do_query(cursor, query)
            if msg == 'Success':
                otimes = Time(data['Timestamp'].astype('int')[::15],
                              format='lv')
                dcmoff = data['DCMoffset_attn']
                dcmoff.shape = (nt, 50)
                # We now have a time-history of offsets, at least some of which are non-zero.
                # Offsets by slot number do us no good, so we need to translate to band number.
                # Get fseqfile name at mean of timerange, from stateframe SQL database
                fseqfile = get_fseqfile(
                    Time(int(np.mean(trange.lv)), format='lv'))
                if fseqfile is None:
                    print 'Error: No active fseq file.'
                    dcm_off = None
                else:
                    # Get fseqfile from ACC and return bandlist
                    bandlist = fseqfile2bandlist(fseqfile)
                    # Use bandlist to covert nt x 50 array to nt x 34 band array of DCM attn offsets
                    # Note that this assumes DCM offset is the same for any multiply-sampled bands
                    # in the sequence.
                    dcm_off = np.zeros((nt, 34), float)
                    dcm_off[:, bandlist - 1] = dcmoff
                    # Put into canonical order [nband, nt]
                    dcm_off = dcm_off.T
                    if dt:
                        # If we want other than full cadence, find mean over dt measurements
                        new_nt = len(times)
                        dcm_off = dcm_off[:, :new_nt * dt]
                        dcm_off.shape = (34, dt, new_nt)
                        dcm_off = np.mean(dcm_off, 1)
            else:
                print 'Error reading DCM attenuations:', msg
                dcm_off = None
    else:
        print 'Error reading DPPon state:', msg
        dcm_off = None
    cursor.close()
    return {
        'times': times,
        'hlev': hlev.astype(int),
        'vlev': vlev.astype(int),
        'dcmattn': dcmattn,
        'dcmoff': dcm_off
    }
Example #13
0
from util import Time
import dbutil as db
import numpy as np

hcurve = []
vcurve = []
t = Time([
    '2015-10-28 6:17', '2015-10-28 6:20', '2015-10-28 6:23', '2015-10-28 6:30',
    '2015-10-28 6:33', '2015-10-28 6:36', '2015-10-28 6:39'
]).lv.astype('int')
for lv in t:
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage from fv61_vD15 where (I15 % 15) = 13 and Timestamp between ' + str(
        lv - 90) + ' and ' + str(lv + 90) + ' order by Timestamp'
    hc, msg = db.do_query(cursor, query)
    hcurve.append(hc)
    query = 'select Timestamp,Ante_Fron_FEM_VPol_Voltage from fv61_vD15 where (I15 % 15) = 13 and Timestamp between ' + str(
        lv - 90) + ' and ' + str(lv + 90) + ' order by Timestamp'
    vc, msg = db.do_query(cursor, query)
    vcurve.append(vc)

f, ax = subplots(2, 1)
hlabel = ['175 mm', '150 mm', '125 mm', '100 mm', '75 mm', '50 mm', '25 mm']
for i, h in enumerate(hcurve):
    x = (h['Timestamp'] - t[i]) * np.cos(13 * pi / 180)
    ax[0].plot(x, h['Ante_Fron_FEM_HPol_Voltage'], label=hlabel[i])
for i, v in enumerate(vcurve):
    x = (v['Timestamp'] - t[i]) * np.cos(13 * pi / 180)
    ax[1].plot(x, v['Ante_Fron_FEM_VPol_Voltage'], label=hlabel[i])
ax[0].set_ylim(1.2, 2.2)
ax[0].legend(fontsize=10)
ax[0].set_xlim(-50, 50)
Example #14
0
def send_xml2sql():
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.
    '''
    import dbutil, read_xml2, sys
    t = util.Time.now()
    timestamp = t.lv  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+key+'.0 order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if it agrees with the version
                buf = outdict['Bin'][0]   # Binary representation of xml file
                f = open('/tmp/tmp.xml','wb')
                f.write(buf)
                f.close()
                mydict, thisver = read_xml2.xml_ptrs('/tmp/tmp.xml')
                #print 'Versions:',float(key)+thisver/10.,defn_version
                if (float(key)+thisver/10.) == defn_version:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, float(key), typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                print typdict[key][0],'successfully added/updated to version',defn_version
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print typdict[key][0],'version',defn_version,'already exists--not updated'
    cursor.commit()
    cursor.close()
            
            
            
            
Example #15
0
def send_xml2sql():
    ''' Routine to send any changed calibration xml definitions to the 
        SQL Server.  The latest definition (if any) for a given type is
        checked to see if the version matches.  If not, an update is 
        stored.  This routine will typically be run whenever a definition
        is added or changed.
    '''
    import dbutil, read_xml2, sys
    t = util.Time.now()
    timestamp = t.lv  # Current time as LabVIEW timestamp
    cursor = dbutil.get_cursor()
    typdict = cal_types()
    for key in typdict.keys():
        #print 'Working on',typdict[key][0]
        # Execute the code to create the xml description for this key
        exec 'buf = '+typdict[key][1]+'()'
        # Resulting buf must be written to a temporary file and reread
        # by xml_ptrs()
        f = open('/tmp/tmp.xml','wb')
        f.write(buf)
        f.close()
        mydict, xmlver = read_xml2.xml_ptrs('/tmp/tmp.xml')
        defn_version = float(key)+xmlver/10.  # Version number expected
        # Retrieve most recent key.0 record and check its version against the expected one
        query = 'select top 1 * from abin where Version = '+key+'.0 order by Timestamp desc'
        #print 'Executing query'
        outdict, msg = dbutil.do_query(cursor,query)
        #print msg
        if msg == 'Success':
            if len(outdict) == 0:
                # This type of xml file does not yet exist in the database, so mark it for adding
                add = True
            else:
                # There is one, so see if it agrees with the version
                buf = outdict['Bin'][0]   # Binary representation of xml file
                f = open('/tmp/tmp.xml','wb')
                f.write(buf)
                f.close()
                mydict, thisver = read_xml2.xml_ptrs('/tmp/tmp.xml')
                #print 'Versions:',float(key)+thisver/10.,defn_version
                if (float(key)+thisver/10.) == defn_version:
                    # This description is already there so we will skip it
                    add = False
                else:
                    add = True
        else:
            # Some kind of error occurred, so print the message and skip adding the xml description
            #print 'Query',query,'resulted in error message:',msg
            add = False
        if add:
            # This is either new or updated, so add the xml description
            # to the database
            #print 'Trying to add',typdict[key][0]
            try:
                cursor.execute('insert into aBin (Timestamp,Version,Description,Bin) values (?,?,?,?)',
                   timestamp, float(key), typdict[key][0], dbutil.stateframedef.pyodbc.Binary(buf))
                print typdict[key][0],'successfully added/updated to version',defn_version
            except:
                print 'Unknown error occurred in adding',typdict[key][0]
                print sys.exc_info()[1]
        else:
            print typdict[key][0],'version',defn_version,'already exists--not updated'
    cursor.commit()
    cursor.close()
Example #16
0
def DCM_attn_anal(filename):
    ''' Analyze a DCMATTNTEST observation to determine the 2- and 4-bit
        attenuation values.  Input is a Miriad file.  Returns two arrays, 
           at2 and at4 of size (nant,npol) = (13,2)
        representing the attenuation, in dB, of the 2- and 4-bit, resp.
    '''
    import read_idb as ri
    import dbutil as db
    import cal_header as ch
    import stateframe as stf
    import copy
    from util import Time
    import matplotlib.pylab as plt

    out = ri.read_idb([filename])
    ts = int(Time(out['time'][0], format='jd').lv + 0.5)
    te = int(Time(out['time'][-1], format='jd').lv + 0.5)
    query = 'select Timestamp,DCM_Offset_Attn from fV65_vD15 where Timestamp between ' + str(
        ts) + ' and ' + str(te) + ' order by Timestamp'
    cursor = db.get_cursor()
    data, msg = db.do_query(cursor, query)
    cursor.close()
    dcm_offset = data['DCM_Offset_Attn'].reshape(
        len(data['DCM_Offset_Attn']) / 15, 15)
    dcm_offset = dcm_offset[:, 0]  # All antennas are the same
    t = Time(out['time'][0], format='jd')
    xml, buf = ch.read_cal(2, t)
    table = stf.extract(buf, xml['Attenuation'])
    bandlist = ((out['fghz'] - 0.5) * 2).astype(int)
    tbl = table[bandlist - 1]
    tbl.shape = (len(bandlist), 15, 2)
    tbl = np.swapaxes(np.swapaxes(tbl, 0, -1), 0, 1)
    tbl2 = np.broadcast_to(tbl, (out['time'].shape[0], 15, 2, 134))
    tbl = copy.copy(np.rollaxis(tbl2, 0, 4))  # Shape (nant,npol,nf,nt)
    pwr = out['p'][:15]  # Shape (nant,npol,nf,nt)
    # Add value of dcm_offset to table
    for i, offset in enumerate(dcm_offset):
        tbl[:, :, :, i] += offset
    # Clip to valid attenuations
    tbl = np.clip(tbl, 0, 30)
    # Isolate good times in various attn states
    goodm2, = np.where(dcm_offset == -2)
    goodm2 = goodm2[2:-3]
    good2, = np.where(dcm_offset == 2)
    good2 = good2[2:-3]
    good0, = np.where(dcm_offset[goodm2[-1]:good2[0]] == 0)
    good0 += goodm2[-1]
    good0 = good0[2:-3]
    good4, = np.where(dcm_offset == 4)
    good4 = good4[2:-3]
    good6, = np.where(dcm_offset == 6)
    good6 = good6[2:-3]
    goodbg = good6 + 30  # Assumes FEMATTN 15 follows good6 30 s later
    # Perform median over good times and create pwrmed with medians
    # The 5 indexes correspond to dcm_offsets -2, 0, 2, 4 and 6
    nant, npol, nf, nt = pwr.shape
    pwrmed = np.zeros((nant, npol, nf, 5))
    # Do not forget to subtract the background
    bg = np.median(pwr[:, :, :, goodbg], 3)
    pwrmed[:, :, :, 0] = np.median(pwr[:, :, :, goodm2], 3) - bg
    pwrmed[:, :, :, 1] = np.median(pwr[:, :, :, good0], 3) - bg
    pwrmed[:, :, :, 2] = np.median(pwr[:, :, :, good2], 3) - bg
    pwrmed[:, :, :, 3] = np.median(pwr[:, :, :, good4], 3) - bg
    pwrmed[:, :, :, 4] = np.median(pwr[:, :, :, good6], 3) - bg
    good = np.array([goodm2[0], good0[0], good2[0], good4[0], good6[0]])
    tbl = tbl[:, :, :, good]
    at2 = np.zeros((13, 2), float)
    at4 = np.zeros((13, 2), float)
    at8 = np.zeros((13, 2), float)
    f1, ax1 = plt.subplots(2, 13)
    f2, ax2 = plt.subplots(2, 13)
    f3, ax3 = plt.subplots(2, 13)
    for ant in range(13):
        for pol in range(2):
            pts = []
            for i in range(4):
                for v in [0, 4, 8, 12, 16, 20, 24, 28]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          2) == tbl[ant, pol, idx, i + 1])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 1])
            pts = np.concatenate(pts)
            ax1[pol, ant].plot(pts, '.')
            ax1[pol, ant].set_ylim(0, 2)
            at2[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            for i in range(3):
                for v in [0, 2, 8, 10, 16, 18, 24, 26]:
                    idx, = np.where(tbl[ant, pol, :, i] == v)
                    if len(idx) != 0:
                        good, = np.where((tbl[ant, pol, idx, i] +
                                          4) == tbl[ant, pol, idx, i + 2])
                        if len(good) != 0:
                            pts.append(pwrmed[ant, pol, idx[good], i] /
                                       pwrmed[ant, pol, idx[good], i + 2])
            pts = np.concatenate(pts)
            ax2[pol, ant].plot(pts, '.')
            ax2[pol, ant].set_ylim(0, 3)
            at4[ant, pol] = np.log10(np.median(pts)) * 10.
            pts = []
            i = 0
            for v in [0, 2, 4, 6, 16, 18, 20, 22]:
                idx, = np.where(tbl[ant, pol, :, i] == v)
                if len(idx) != 0:
                    good, = np.where((tbl[ant, pol, idx, i] + 8) == tbl[ant,
                                                                        pol,
                                                                        idx,
                                                                        i + 4])
                    if len(good) != 0:
                        pts.append(pwrmed[ant, pol, idx[good], i] /
                                   pwrmed[ant, pol, idx[good], i + 4])
            try:
                pts = np.concatenate(pts)
            except:
                # Looks like there were no points for this antenna/polarization, so set to nominal attn
                pts = [6.30957, 6.30957, 6.30957]
            ax3[pol, ant].plot(pts, '.')
            ax3[pol, ant].set_ylim(5, 8)
            at8[ant, pol] = np.log10(np.median(pts)) * 10.
    plt.show()
    # Generate output table, a complex array of size (nant,npol,nbits)
    attn = np.zeros((16, 2, 4), np.complex)
    # Set to nominal values, then overwrite with measured ones
    for i in range(16):
        for j in range(2):
            attn[i, j] = [2.0 + 0j, 4.0 + 0j, 8.0 + 0j, 16.0 + 0j]
    attn[:13, :, 0] = at2 + 0j
    attn[:13, :, 1] = at4 + 0j
    attn[:13, :, 2] = at8 + 0j
    return attn
Example #17
0
def gain_state(trange=None):
    ''' Read and assemble the gain state for the given timerange from 
        the SQL database, or for the last 10 minutes if trange is None.
        
        Returns the complex attenuation of the FEM for the timerange
        as an array of size (nant, npol, ntimes) [not band dependent],
        and the complex attenuation of the DCM for the same timerange
        as an array of size (nant, npol, nbands, ntimes).  Also returns
        the time as a Time() object array.
    '''
    from util import Time
    import dbutil as db
    from fem_attn_calib import fem_attn_update
    import cal_header as ch

    if trange is None:
        t = Time.now()
        t2 = Time(t.jd - 600. / 86400., format='jd')
        trange = Time([t2.iso, t.iso])
    ts = trange[0].lv  # Start timestamp
    te = trange[1].lv  # End timestamp
    cursor = db.get_cursor()
    # First get FEM attenuation for timerange
    D15dict = db.get_dbrecs(cursor, dimension=15, timestamp=trange)
    DCMoffdict = db.get_dbrecs(cursor, dimension=50, timestamp=trange)
    DCMoff_v_slot = DCMoffdict['DCMoffset_attn']
    #    DCMoff_0 = D15dict['DCM_Offset_Attn'][:,0]  # All ants are the same
    fem_attn = {}
    fem_attn['timestamp'] = D15dict['Timestamp'][:, 0]
    nt = len(fem_attn['timestamp'])
    junk = np.zeros([nt, 1], dtype='int')  #add the non-existing antenna 16
    fem_attn['h1'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_First'],
                               junk,
                               axis=1)  #FEM hpol first attn value
    fem_attn['h2'] = np.append(D15dict['Ante_Fron_FEM_HPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM hpol second attn value
    fem_attn['v1'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_First'],
                               junk,
                               axis=1)  #FEM vpol first attn value
    fem_attn['v2'] = np.append(D15dict['Ante_Fron_FEM_VPol_Atte_Second'],
                               junk,
                               axis=1)  #FEM vpol second attn value
    fem_attn['ants'] = np.append(D15dict['I15'][0, :], [15])
    # Add corrections from SQL database for start time of timerange
    fem_attn_corr = fem_attn_update(fem_attn, trange[0])
    # Next get DCM attenuation for timerange
    # Getting next earlier scan header
    ver = db.find_table_version(cursor, ts, True)
    query = 'select top 50 Timestamp,FSeqList from hV' + ver + '_vD50 where Timestamp <= ' + str(
        ts) + ' order by Timestamp desc'
    fseq, msg = db.do_query(cursor, query)
    if msg == 'Success':
        fseqlist = fseq['FSeqList'][::-1]  # Reverse the order
        bandlist = ((np.array(fseqlist) - 0.44) * 2).astype(int)
    cursor.close()
    # Read current DCM_table from database
    xml, buf = ch.read_cal(3, trange[0])
    orig_table = stf.extract(buf, xml['Attenuation']).astype('int')
    orig_table.shape = (50, 15, 2)
    xml, buf = ch.read_cal(6, trange[0])
    dcm_attn_bitv = np.nan_to_num(stf.extract(
        buf, xml['DCM_Attn_Real'])) + np.nan_to_num(
            stf.extract(buf, xml['DCM_Attn_Imag'])) * 1j
    #    # Add one more bit (all zeros) to take care of unit bit
    #    dcm_attn_bitv = np.concatenate((np.zeros((16,2,1),'int'),dcm_attn_bitv),axis=2)
    # We now have:
    #   orig_table     the original DCM at start of scan, size (nslot, nant=15, npol)
    #   DCMoff_0       the offset applied to all antennas and slots (ntimes)
    #   DCMoff_v_slot  the offest applied to all antennas but varies by slot (ntimes, nslot)
    #   dcm_attn_bitv  the measured (non-nominal) attenuations for each bit value (nant=16, npol, nbit) -- complex
    # Now I need to convert slot to band, add appropriately, and organize as (nant=16, npol, nband, ntimes)
    # Add one more antenna (all zeros) to orig_table
    orig_table = np.concatenate((orig_table, np.zeros((50, 1, 2), 'int')),
                                axis=1)
    ntimes, nslot = DCMoff_v_slot.shape
    dcm_attn = np.zeros((16, 2, 34, ntimes), np.int)
    for i in range(ntimes):
        for j in range(50):
            idx = bandlist[j] - 1
            # This adds attenuation for repeated bands--hopefully the same value for each repeat
            dcm_attn[:, :, idx, i] += orig_table[j, :, :] + DCMoff_v_slot[i, j]
    # Normalize repeated bands by finding number of repeats and dividing.
    for i in range(1, 35):
        n = len(np.where(bandlist == i)[0])
        if n > 1:
            dcm_attn[:, :, i - 1, :] /= n
    # Make sure attenuation is in range
    dcm_attn = np.clip(dcm_attn, 0, 30)
    # Finally, correct for non-nominal (measured) bit values
    # Start with 0 attenuation as reference
    dcm_attn_corr = dcm_attn * (0 + 0j)
    att = np.zeros((16, 2, 34, ntimes, 5), np.complex)
    # Calculate resulting attenuation based on bit attn values (2,4,8,16)
    for i in range(4):
        # Need dcm_attn_bitv[...,i] to be same shape as dcm_attn
        bigger_bitv = np.broadcast_to(dcm_attn_bitv[..., i],
                                      (ntimes, 34, 16, 2))
        bigger_bitv = np.swapaxes(
            np.swapaxes(np.swapaxes(bigger_bitv, 0, 3), 1, 2), 0, 1)
        att[..., i] = (np.bitwise_and(dcm_attn, 2**(i + 1)) >>
                       (i + 1)) * bigger_bitv
        dcm_attn_corr = dcm_attn_corr + att[..., i]

    # Move ntimes column to next to last position, and then sum over last column (the two attenuators)
    fem_attn_corr = np.sum(np.rollaxis(fem_attn_corr, 0, 3), 3)
    # Output is FEM shape (nant, npol, ntimes) = (16, 2, ntimes)
    #           DCM shape (nant, npol, nband, ntimes) = (16, 2, 34, ntimes)
    # Arrays are complex, in dB units
    tjd = Time(fem_attn['timestamp'].astype('int'), format='lv').jd
    return fem_attn_corr, dcm_attn_corr, tjd
Example #18
0
def get_gain_state(trange, dt=None, relax=False):
    ''' Get all gain-state information for a given timerange.  Returns a dictionary
        with keys as follows:
        
        times:     A Time object containing the array of times, size (nt)
        h1:        The first HPol attenuator value for 15 antennas, size (nt, 15) 
        v1:        The first VPol attenuator value for 15 antennas, size (nt, 15) 
        h2:        The second HPol attenuator value for 15 antennas, size (nt, 15) 
        v2:        The second VPol attenuator value for 15 antennas, size (nt, 15)
        dcmattn:   The base DCM attenuations for nbands x 15 antennas x 2 Poln, size (34 or 52,30)
                      The order is Ant1 H, Ant1 V, Ant2 H, Ant2 V, etc.
        dcmoff:    If DPPoffset-on is 0, this is None (meaning there are no changes to the
                      above base attenuations).  
                   If DPPoffset-on is 1, then dcmoff is a table of offsets to the 
                      base attenuation, size (nt, 50).  The offset applies to all 
                      antennas/polarizations.
                      
        Optional keywords:
           dt      Seconds between entries to read from SQL stateframe database. 
                     If omitted, 1 s is assumed.
           relax   Used for gain of reference time, in case there are no SQL data for the
                     requested time.  In that case it finds the data for the nearest later time.
    '''
    if dt is None:
        tstart, tend = [str(i) for i in trange.lv]
    else:
        # Expand time by 1/2 of dt before and after
        tstart = str(np.round(trange[0].lv - dt / 2))
        tend = str(np.round(trange[1].lv + dt / 2))
    cursor = db.get_cursor()
    ver = db.find_table_version(cursor, trange[0].lv)
    # Get front end attenuator states
    # Attempt to solve the problem if there are no data
    if relax:
        # Special case of reference gain, where we want the first nt records after tstart, in case there
        # are no data at time tstart
        nt = int(float(tend) - float(tstart) - 1) * 15
        query = 'select top '+str(nt)+' Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \
            +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' order by Timestamp'
    else:
        query = 'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second,' \
            +'Ante_Fron_FEM_VPol_Atte_First,Ante_Fron_FEM_VPol_Atte_Second,Ante_Fron_FEM_Clockms from fV' \
            +ver+'_vD15 where Timestamp >= '+tstart+' and Timestamp < '+tend+' order by Timestamp'
    #if dt:
    #    # If dt (seconds between measurements) is set, add appropriate SQL statement to query
    #    query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 '
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        if dt:
            # If we want other than full cadence, get new array shapes and times
            n = len(data['Timestamp'])  # Original number of times
            new_n = (
                n / 15 / dt
            ) * 15 * dt  # Truncated number of times equally divisible by dt
            new_shape = (n / 15 / dt, dt, 15)  # New shape of truncated arrays
            times = Time(data['Timestamp'][:new_n].astype('int')[::15 * dt],
                         format='lv')
        else:
            times = Time(data['Timestamp'].astype('int')[::15], format='lv')
        # Change tstart and tend to correspond to actual times from SQL
        tstart, tend = [str(i) for i in times[[0, -1]].lv]
        h1 = data['Ante_Fron_FEM_HPol_Atte_First']
        h2 = data['Ante_Fron_FEM_HPol_Atte_Second']
        v1 = data['Ante_Fron_FEM_VPol_Atte_First']
        v2 = data['Ante_Fron_FEM_VPol_Atte_Second']
        ms = data['Ante_Fron_FEM_Clockms']
        nt = len(h1) / 15
        h1.shape = (nt, 15)
        h2.shape = (nt, 15)
        v1.shape = (nt, 15)
        v2.shape = (nt, 15)
        ms.shape = (nt, 15)
        # Find any entries for which Clockms is zero, which indicates where no
        # gain-state measurement is available.
        for i in range(15):
            bad, = np.where(ms[:, i] == 0)
            if bad.size != 0 and bad.size != nt:
                # Find nearest adjacent good value
                good, = np.where(ms[:, i] != 0)
                idx = nearest_val_idx(bad, good)
                h1[bad, i] = h1[good[idx], i]
                h2[bad, i] = h2[good[idx], i]
                v1[bad, i] = v1[good[idx], i]
                v2[bad, i] = v2[good[idx], i]
        if dt:
            # If we want other than full cadence, find mean over dt measurements
            h1 = np.mean(h1[:new_n / 15].reshape(new_shape), 1)
            h2 = np.mean(h2[:new_n / 15].reshape(new_shape), 1)
            v1 = np.mean(v1[:new_n / 15].reshape(new_shape), 1)
            v2 = np.mean(v2[:new_n / 15].reshape(new_shape), 1)
        # Put results in canonical order [nant, nt]
        h1 = h1.T
        h2 = h2.T
        v1 = v1.T
        v2 = v2.T
    else:
        print 'Error reading FEM attenuations:', msg
        return {}
    # Get back end attenuator states
    xml, buf = ch.read_cal(2, t=trange[0])
    dcmattn = stf.extract(buf, xml['Attenuation'])
    nbands = dcmattn.shape[0]
    dcmattn.shape = (nbands, 15, 2)
    # Put into canonical order [nant, npol, nband]
    dcmattn = np.moveaxis(dcmattn, 0, 2)
    # See if DPP offset is enabled
    query = 'select Timestamp,DPPoffsetattn_on from fV' \
            +ver+'_vD1 where Timestamp >= '+tstart+' and Timestamp <= '+tend+'order by Timestamp'
    data, msg = db.do_query(cursor, query)
    if msg == 'Success':
        dppon = data['DPPoffsetattn_on']
        if np.where(dppon > 0)[0].size == 0:
            dcm_off = None
        else:
            query = 'select Timestamp,DCMoffset_attn from fV' \
                    +ver+'_vD50 where Timestamp >= '+tstart+' and Timestamp <= '+tend
            #if dt:
            #    # If dt (seconds between measurements) is set, add appropriate SQL statement to query
            #    query += ' and (cast(Timestamp as bigint) % '+str(dt)+') = 0 '
            query += ' order by Timestamp'
            data, msg = db.do_query(cursor, query)
            if msg == 'Success':
                otimes = Time(data['Timestamp'].astype('int')[::15],
                              format='lv')
                dcmoff = data['DCMoffset_attn']
                dcmoff.shape = (nt, 50)
                # We now have a time-history of offsets, at least some of which are non-zero.
                # Offsets by slot number do us no good, so we need to translate to band number.
                # Get fseqfile name at mean of timerange, from stateframe SQL database
                fseqfile = get_fseqfile(
                    Time(int(np.mean(trange.lv)), format='lv'))
                if fseqfile is None:
                    print 'Error: No active fseq file.'
                    dcm_off = None
                else:
                    # Get fseqfile from ACC and return bandlist
                    bandlist = fseqfile2bandlist(fseqfile)
                    nbands = len(bandlist)
                    # Use bandlist to covert nt x 50 array to nt x nbands array of DCM attn offsets
                    # Note that this assumes DCM offset is the same for any multiply-sampled bands
                    # in the sequence.
                    dcm_off = np.zeros((nt, nbands), float)
                    dcm_off[:, bandlist - 1] = dcmoff
                    # Put into canonical order [nband, nt]
                    dcm_off = dcm_off.T
                    if dt:
                        # If we want other than full cadence, find mean over dt measurements
                        new_nt = len(times)
                        dcm_off = dcm_off[:, :new_nt * dt]
                        dcm_off.shape = (nbands, dt, new_nt)
                        dcm_off = np.mean(dcm_off, 1)
            else:
                print 'Error reading DCM attenuations:', msg
                dcm_off = None
    else:
        print 'Error reading DPPon state:', msg
        dcm_off = None
    cursor.close()
    return {
        'times': times,
        'h1': h1,
        'v1': v1,
        'h2': h2,
        'v2': v2,
        'dcmattn': dcmattn,
        'dcmoff': dcm_off
    }
Example #19
0
def flare_monitor(t):
    ''' Get all front-end power-detector voltages for the given day
        from the stateframe SQL database, and obtain the median of them, 
        to use as a flare monitor.
        
        Returns ut times in plot_date format and median voltages.
    '''
    import dbutil
    # timerange is 12 UT to 12 UT on next day, relative to the day in Time() object t
    trange = Time([int(t.mjd) + 12. / 24, int(t.mjd) + 36. / 24], format='mjd')
    tstart, tend = trange.lv.astype('str')
    cursor = dbutil.get_cursor()
    mjd = t.mjd
    verstr = dbutil.find_table_version(cursor, tstart)
    if verstr is None:
        print 'No stateframe table found for given time.'
        return tstart, [], {}
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage,Ante_Fron_FEM_VPol_Voltage from fV' + verstr + '_vD15 where timestamp between ' + tstart + ' and ' + tend + ' order by timestamp'
    data, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return tstart, [], {}
    for k, v in data.items():
        data[k].shape = (len(data[k]) / 15, 15)
    hv = []
    try:
        ut = Time(data['Timestamp'][:, 0].astype('float'),
                  format='lv').plot_date
    except:
        print 'Error for time', t.iso
        print 'Query:', query, ' returned msg:', msg
        print 'Keys:', data.keys()
        print data['Timestamp'][0, 0]
    hfac = np.median(data['Ante_Fron_FEM_HPol_Voltage'].astype('float'), 0)
    vfac = np.median(data['Ante_Fron_FEM_VPol_Voltage'].astype('float'), 0)
    for i in range(4):
        if hfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_HPol_Voltage'][:, i] / hfac[i])
        if vfac[i] > 0:
            hv.append(data['Ante_Fron_FEM_VPol_Voltage'][:, i] / vfac[i])
    flm = np.median(np.array(hv), 0)
    good = np.where(abs(flm[1:] - flm[:-1]) < 0.01)[0]

    # Get the project IDs for scans during the period
    verstrh = dbutil.find_table_version(cursor, trange[0].lv, True)
    if verstrh is None:
        print 'No scan_header table found for given time.'
        return ut[good], flm[good], {}
    query = 'select Timestamp,Project from hV' + verstrh + '_vD1 where Timestamp between ' + tstart + ' and ' + tend + ' order by Timestamp'
    projdict, msg = dbutil.do_query(cursor, query)
    if msg != 'Success':
        print msg
        return ut[good], flm[good], {}
    elif len(projdict) == 0:
        # No Project ID found, so return data and empty projdict dictionary
        print 'SQL Query was valid, but no Project data were found.'
        return ut[good], flm[good], {}
    projdict['Timestamp'] = projdict['Timestamp'].astype(
        'float')  # Convert timestamps from string to float
    for i in range(len(projdict['Project'])):
        projdict['Project'][i] = projdict['Project'][i].replace('\x00', '')

    # # Get the times when scanstate is -1
    # cursor.execute('select Timestamp,Sche_Data_ScanState from fV'+verstr+'_vD1 where Timestamp between '+tstart+' and '+tend+' and Sche_Data_ScanState = -1 order by Timestamp')
    # scan_off_times = np.transpose(np.array(cursor.fetchall()))[0]  #Just list of timestamps
    # if len(scan_off_times) > 2:
    # gaps = scan_off_times[1:] - scan_off_times[:-1] - 1
    # eos = np.where(gaps > 10)[0]
    # if len(eos) > 1:
    # if scan_off_times[eos[1]] < projdict['Timestamp'][0]:
    # # Gaps are not lined up, so drop the first:
    # eos = eos[1:]
    # EOS = scan_off_times[eos]
    # if scan_off_times[eos[0]] <= projdict['Timestamp'][0]:
    # # First EOS is earlier than first Project ID, so make first Project ID None.
    # projdict['Timestamp'] = np.append([scan_off_times[0]],projdict['Timestamp'])
    # projdict['Project'] = np.append(['None'],projdict['Project'])
    # if scan_off_times[eos[-1]+1] >= projdict['Timestamp'][-1]:
    # # Last EOS is later than last Project ID, so make last Project ID None.
    # projdict['Timestamp'] = np.append(projdict['Timestamp'],[scan_off_times[eos[-1]+1]])
    # projdict['Project'] = np.append(projdict['Project'],['None'])
    # EOS = np.append(EOS,[scan_off_times[eos[-1]+1],scan_off_times[-1]])
    # projdict.update({'EOS': EOS})
    # else:
    # # Not enough scan changes to determine EOS (end-of-scan) times
    # projdict.update({'EOS': []})
    # This turns out to be a more rational, and "good-enough"" approach to the end of scan problem.
    # The last scan, though, will be ignored...
    projdict.update({'EOS': projdict['Timestamp'][1:]})
    projdict.update({'Timestamp': projdict['Timestamp'][:-1]})
    projdict.update({'Project': projdict['Project'][:-1]})
    cursor.close()
    return ut[good], flm[good], projdict
Example #20
0
def get_state_idx(trange, cycles=4, attenuator=1):
    #This program creates an array of shape (6, 4) which contains the 
    #  times in which each attenuator is in each state. 6 attenuators,
    #  4 cycles. 
    firstorsecond = ['First', 'Second']
    s = sp.Spectrogram(trange)
    s.docal = False
    s.dosub = False
    s.domedian = False
    cursor = dbutil.get_cursor()
    res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange[0].lv)+' and '+str(trange[1].lv))
    cursor.close()
    if msg == 'Success':
        antlist = []
        for i in [0, 1, 2, 4, 8, 16]:
            statelist = []
            for j in range(15):
                state, = np.where(np.logical_and(res['Ante_Fron_FEM_HPol_Atte_' + firstorsecond[attenuator]][j::15].astype('int') == i,res['Ante_Fron_FEM_HPol_Atte_' + firstorsecond[attenuator-1]][j::15].astype('int') != 0))
                statelist.append(state)
            statelist = np.array(statelist)
            antlist.append(statelist)
        states = np.array(antlist)
        states = np.rollaxis(states, 1)
        for i in range(15):
            for j in range(6):
                states[i, j] = res['Timestamp'][i::15][states[i, j]]
    else:
        print 'failure'
        return None
    time_array = (s.time.lv+0.001).astype('int')
    time_list = list(time_array)
    attns = ['0', '1', '2', '4', '8', '16']
    common_list = []
    for j in range(6):
        # Antenna 1 is used as the reference antenna here. 
        #  Earlier versions had only indices which were shared 
        #  for attenuations AND antennas, but because of a
        #  small timing error that occur between antennas
        #  during the scan itself, this older version would
        #  fail sometimes. 
        i1, i2 = util.common_val_idx(time_array,states[0,j])
        if i1.shape == i2.shape:
            common_ant_list = i2
        else: 
            print 'There is a problem with antenna '+str(i)+' at attenuation '+attns[j]
        common_list.append(common_ant_list)
    
    final_indices = []
    final_indices1 = []
    for i in range(6):
        index_list = []
        for indxs in common_list[i]:
            try:
                index_list.append(time_list.index(states[0,i][indxs]))
            except:
                pass
        final_indices1.append(index_list)
    for i in range(6):
        indices_array = np.array(final_indices1[i])
        final_indices.append(indices_array)
    final_indices = np.array(final_indices) 

    rolled_indices = []
    for i in range(6):
        rolled = np.roll(final_indices[i], -1)
        rolled_indices.append(rolled)
    subtracted_list = []
    for j in range(6):
        subtracted_list.append(rolled_indices[j] - final_indices[j])
    break_lists = []
    for k in range(6):
        break_list = []
        for indx in range(subtracted_list[k].shape[0]):
            if np.absolute(subtracted_list[k][indx]) <= 2:
                break_list.append(indx)
            else: 
                break_list.append(-1)
        break_lists.append(break_list)
    for i in range(6):
        for indx in range(int(len(break_lists[i]))-1):
            try:
                if break_lists[i][indx] == break_lists[i][indx-1]:
                    break_lists[i].pop(indx)
            except:
                pass
    break_list = []
    for j in range(6):
        breaklist = np.array(break_lists[j])
        break_list.append(breaklist)
    break_spots = []
    for i in range(6):
        try:
            break_spot = []
            for indx in range(len(break_list[i])):
                if break_list[i][indx] == -1:
                    break_spot.append(indx)
            break_spots.append(break_spot)
        except:
            pass
    split_lists = []
    for k in range(6):
        steps_list = [break_list[k][0:break_spots[k][0]]]
        for j in range(cycles-1):
            try:
                steps_list.append(break_list[k][1 + break_spots[k][j]:break_spots[k][j+1]])
            except:
                pass            
        split_lists.append(steps_list)
    split_lists = np.array(split_lists)  
    final_grouped_indices = []
    for i in range(6):
        grouped_indices = []
        for j in range(cycles):
            try:
                indices_ = []
                for indxs in split_lists[i][j]:
                    indices_.append(rolled_indices[i][indxs])
                grouped_indices.append(indices_)
            except:
                pass
        final_grouped_indices.append(grouped_indices)
    final_grouped_indices = np.array(final_grouped_indices)
    for i in range(6):
        for j in range(cycles):
            try:
                for k in range(1,int(len(final_grouped_indices[i][j]))-1):
                    try:
                        for m in range(len(final_ped_indices[i][j])):
                            if (final_grouped_indices[i][j][k-1] + 3) <= final_grouped_indices[i][j][k]:
                                final_grouped_indices[i][j].pop(k-1) 
                            if (final_grouped_indices[i][j][k+1]-3) >= final_grouped_indices[i][j][k]:
                                final_grouped_indices[i][j].pop(k+1)       
                    except:
                        pass
            except:
                pass
    return final_grouped_indices, res
Example #21
0
def attn_noises(trange_gaincal):
        # This function is used with "get_reverseattn." It find and returns background noise, and returns the "res"
        #  from dbutil.do_query and returns the "tsys" from spectrogram_fit and get_data().
        #  The first parameter it takes should be a GAINCALTEST trange, which may be located with find_gaincal, 
        #  The second parameter it takes should be a FEATTNTEST trange, which may be located with find_gaincal as
        #  well. 
        #
        #  PLEASE NOTE: ANY trange with data may be used as "trange_gaincal", use a trange from a GAINCALTEST and the other 
        #  file as "trange_other" if you want the noise to be calibrated from the GAINCALTEST file, which will most likely
        #  be more recent than the FEATTNTEST file it would otherwise take the noise from. 
        s = sp.Spectrogram(trange_gaincal)
	s.docal = False
	s.dosub = False
	s.domedian = False        
	tsys, std = s.get_data()
        trange_feattncal = find_gaincal()
        if type(trange_feattncal) == list:
            trange_feattncal = trange_feattncal[-1]
        else:
            pass
        trange_feattncal2 = find_gaincal(t = Time('2015-07-21 00:00'), scan_length=5, findwhat='FEATTNTEST2')
        if type(trange_feattncal2) == list:
            trange_feattncal2 = trange_feattncal2[-1]
        else:
            pass
        ratios, calfilenoise = show_dB_ratio(trange_feattncal)
        ratios1, calfilenoise1 = show_dB_ratio(trange_feattncal2, test='FEATTNTEST2')

	cursor = dbutil.get_cursor()
	res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange_gaincal[0].lv)+' and '+str(trange_gaincal[1].lv))
	cursor.close()

        idx1, idx2 = util.common_val_idx(res['Timestamp'][0::15].astype('int'), (s.time.lv+0.5).astype('int'))       
        idx3, idx4 = util.common_val_idx(res['Timestamp'].astype('int'), (s.time.lv+0.5).astype('int'))
        marker = -1
        while idx1[-1] > idx2[-1]:
            idx1 = np.delete(idx1, -1)
            marker += 1
        tsys = tsys[:, :, :, idx1]

        calfilenoise_ = []
        for ant in range(calfilenoise.shape[0]):
            calfilenoisepol = []
            for pol in range(calfilenoise.shape[1]):
                calfilenoisefreq = []
                for freq in range(calfilenoise.shape[2]):
                    calfilenoisefreq.append(np.average(calfilenoise[ant, pol, freq, :]))
                calfilenoisepol.append(calfilenoisefreq)
            calfilenoise_.append(calfilenoisepol)
        calfilenoise = np.array(calfilenoise_)

	noise_level = []
	for ant in range(tsys.shape[0]):
	    pol_noise = []
	    for pol in range(tsys.shape[1]):
		freq_noise = []
		state, = np.where(np.logical_and(res['Ante_Fron_FEM_HPol_Atte_Second'][ant::15].astype('int') == 31,res['Ante_Fron_FEM_HPol_Atte_First'][ant::15].astype('int') == 31))
		for freq in range(tsys.shape[2]):
		    avg_noise = []
		    for index in state:
                        try:
		            if np.logical_and(tsys[ant, pol, freq, index] <= 0.005, index < tsys.shape[3]):
		                avg_noise.append(tsys[ant, pol, freq, index])
                        except:
                            pass
		    freq_noise.append(np.average(avg_noise))
		pol_noise.append(freq_noise)
	    noise_level.append(pol_noise)
	noise_level = np.array(noise_level)

        for ant in range(tsys.shape[0]):
	    for pol in range(tsys.shape[1]):
		for freq in range(tsys.shape[2]):                      
		    if np.isnan(noise_level[ant, pol, freq]) == False:
                        pass
                    else:
                        if np.isnan(noise_level[ant, pol, freq]) == True:                           
                            try:
                                noise_level[ant, pol, freq] = calfilenoise[ant, pol, freq]
                            except:
                                pass

        return tsys, res, noise_level, idx1, idx3, marker, trange_feattncal, trange_feattncal2
Example #22
0
def get_reverseattn(trange_gaincal, trange_other=None, first_attn_base=5, second_attn_base=3, corrected_attns=False):
        # This function finds and return the tsys, the noise corrected tsys, and the noise and attenuation
        #  tsys. The first parameter it takes should be a GAINCALTEST trange, which may be located with find_gaincal, 
        #  The second parameter it takes should be a FEATTNTEST trange, which may be located with find_gaincal as
        #  well. It may or may not take a third parameter. Any form of file that was recorded by the system from 
        #  from the antennas may be inserted here, whether a flare or just quiet sun data.
        #
        #  PLEASE NOTE: ANY trange with data may be used as "trange_gaincal", use a trange from a GAINCALTEST and the other 
        #  file as "trange_other" if you want the noise to be calibrated from the GAINCALTEST file, which will most likely
        #  be more recent than the FEATTNTEST file it would otherwise take the noise from. 
        tsys1, res1, noise_level, idx1a, idx3a, marker1, trange_feattncal, trange_feattncal2  = attn_noises(trange_gaincal)
        if corrected_attns == True:
            all_attns_avg = get_all_attns(trange_feattncal, test='FEATTNTEST', from_corrected_attns=True)
            all_attns_avg1 = get_all_attns(trange_feattncal2, test='FEATTNTEST2', from_corrected_attns=True)
        else:
            if corrected_attns == False:
                all_attns_avg = get_all_avg_attns(trange_feattncal, test='FEATTNTEST')
                all_attns_avg1 = get_all_avg_attns(trange_feattncal2, test='FEATTNTEST2')
        
        if trange_other == None:
            tsys = tsys1
            res = res1
            idx1 = idx1a
            idx3 = idx3a
            marker = marker1
        else:
            s = sp.Spectrogram(trange_other)
      	    s.docal = False
	    s.dosub = False
	    s.domedian = False        
	    tsys, std = s.get_data()
            cursor = dbutil.get_cursor()
	    res, msg = dbutil.do_query(cursor,'select Timestamp,Ante_Fron_FEM_HPol_Atte_First,Ante_Fron_FEM_HPol_Atte_Second from fV54_vD15 where Timestamp between '+str(trange_other[0].lv)+' and '+str(trange_other[1].lv))
	    cursor.close()
            idx1, idx2 = util.common_val_idx(res['Timestamp'][0::15].astype('int'), (s.time.lv+0.5).astype('int'))
            marker = -1
            while idx1[-1] > idx2[-1]:
                idx1 = np.delete(idx1, -1)
                marker += 1
            tsys = tsys[:, :, :, idx1]
            idx3, idx4 = util.common_val_idx(res['Timestamp'].astype('int'), (s.time.lv+0.5).astype('int'))
            res['Timestamp'] = res['Timestamp'][idx3]

        if noise_level.shape[2] < tsys.shape[2]:
            freqs_for_range = noise_level.shape[2]
        else:
            freqs_for_range = tsys.shape[2]
                            
	tsys_noise_corrected = []
	for ant in range(tsys.shape[0]):
	    pol_corrected = []
	    for pol in range(tsys.shape[1]):
		freq_corrected = []
		for freq in range(freqs_for_range):
		    index_corrected = []
		    for index in range(tsys.shape[3]):
		        index_corrected.append(tsys[ant, pol, freq, index] - noise_level[ant, pol, freq])
		    freq_corrected.append(index_corrected)
		pol_corrected.append(freq_corrected)
	    tsys_noise_corrected.append(pol_corrected)
	tsys_noise_corrected= np.array(tsys_noise_corrected)
        
        freqloopslist = [tsys_noise_corrected.shape[2], all_attns_avg.shape[3], all_attns_avg1.shape[3]]
        freqloops = min(freqloopslist)

        if tsys.shape[3] < len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15]):
            indexloops = tsys.shape[3]
        else:
            if tsys.shape[3] >= len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15]):
                indexloops = len(res['Ante_Fron_FEM_HPol_Atte_Second'][0::15])-1  
        if tsys.shape[3] < len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15]):
            indexloops1 = tsys.shape[3]
        else:
            if tsys.shape[3] >= len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15]):
                indexloops1 = len(res['Ante_Fron_FEM_HPol_Atte_First'][0::15])-1   
        idxstart = marker + (15-8)
        xory = ['x' , 'y']
	ant_postcorrected = []
	for ant in range(tsys.shape[0]):
	    pol_postcorrected = []
	    for pol in range(tsys.shape[1]):
		freq_postcorrected = []
		for freq in range(freqloops):
                     
                    indices_postcorrected = []
                    for indx in range(indexloops): 
                        testlevel = res['Ante_Fron_FEM_HPol_Atte_Second'][ant::15][indx+idxstart] 
                        
                        if 0 <= testlevel <= 31:
                            pass
                        else:
                            print 'Problem with the attenuation of antenna ' + str(ant) + xory[pol] + ' at frequency channel ' + str(freq) + ' and time index '  + str(indx) + '. The attenuation is showing: ' + str(testlevel)       
                            testlevel = 0       
		        indices_postcorrected.append(10**((all_attns_avg[testlevel, ant, pol, freq]-all_attns_avg[second_attn_base, ant, pol, freq])/10)*tsys_noise_corrected[ant, pol, freq, indx])
                    indices_postcorrected1 = []
                    for indx in range(indexloops1): 
                        testlevel = res['Ante_Fron_FEM_HPol_Atte_First'][ant::15][indx+idxstart]                        
                        if 0 <= testlevel <= 31:
                            pass
                        else:
                            print 'Problem with the attenuation of antenna ' + str(ant) + xory[pol] + ' at frequency channel ' + str(freq) + ' and time index '  + str(indx) + '. The attenuation is showing: ' + str(testlevel)       
                            testlevel = 0      
		        indices_postcorrected1.append(10**((all_attns_avg1[testlevel, ant, pol, freq]-all_attns_avg1[first_attn_base, ant, pol, freq])/10)*indices_postcorrected[indx])                            
		    freq_postcorrected.append(indices_postcorrected1)
		pol_postcorrected.append(freq_postcorrected)
	    ant_postcorrected.append(pol_postcorrected)
	tsys_attn_noise_corrected = np.array(ant_postcorrected)
        
        return tsys_attn_noise_corrected, tsys_noise_corrected, tsys
from util import Time
import dbutil as db
import numpy as np

hcurve = []
vcurve = []
t = Time(['2015-10-28 6:17','2015-10-28 6:20','2015-10-28 6:23','2015-10-28 6:30','2015-10-28 6:33','2015-10-28 6:36','2015-10-28 6:39']).lv.astype('int')
for lv in t:
    query = 'select Timestamp,Ante_Fron_FEM_HPol_Voltage from fv61_vD15 where (I15 % 15) = 13 and Timestamp between '+str(lv-90)+' and '+str(lv+90)+' order by Timestamp'
    hc, msg = db.do_query(cursor,query)
    hcurve.append(hc)
    query = 'select Timestamp,Ante_Fron_FEM_VPol_Voltage from fv61_vD15 where (I15 % 15) = 13 and Timestamp between '+str(lv-90)+' and '+str(lv+90)+' order by Timestamp'
    vc, msg = db.do_query(cursor,query)
    vcurve.append(vc)

f,ax = subplots(2,1)
hlabel = ['175 mm','150 mm','125 mm','100 mm','75 mm','50 mm','25 mm']
for i,h in enumerate(hcurve):
    x = (h['Timestamp']-t[i])*np.cos(13*pi/180)
    ax[0].plot(x,h['Ante_Fron_FEM_HPol_Voltage'],label=hlabel[i])
for i,v in enumerate(vcurve):
    x = (v['Timestamp']-t[i])*np.cos(13*pi/180)
    ax[1].plot(x,v['Ante_Fron_FEM_VPol_Voltage'],label=hlabel[i])
ax[0].set_ylim(1.2,2.2)
ax[0].legend(fontsize=10)
ax[0].set_xlim(-50,50)
ax[1].set_ylim(1.2,2.2)
ax[1].set_xlim(-50,50)
ax[1].legend(fontsize=10)
suptitle('27-m Drift Scans on Moon vs. Focus',fontsize='18')
ax[0].set_ylabel('HPol Voltage')
Example #24
0
def tp_bgnd(tpdata):
    ''' Create time-variable background from ROACH inlet temperature
        This version is far superior to the earlier, crude version, but
        beware that it works best for a long timerange of data, especially
        when there is a flare in the data.
        
        Inputs:
          tpdata   dictionary returned by read_idb()  NB: tpdata is not changed.
          
        Returns:
          bgnd     The background fluctuation array of size (nf,nt) to be 
                     subtracted from any antenna's total power (or mean of
                     antenna total powers)
    '''
    import dbutil as db
    from util import Time, nearest_val_idx
    outfghz = tpdata['fghz']
    try:
        outtime = tpdata['time']
        trange = Time(outtime[[0, -1]], format='jd')
    except:
        outtime = tpdata['ut_mjd']
        trange = Time(outtime[[0, -1]], format='mjd')

    tstr = trange.lv.astype(int).astype(str)
    nt = len(outtime)
    if nt < 1200:
        print 'TP_BGND: Error, timebase too small.  Must have at least 1200 time samples.'
        return None
    nf = len(outfghz)
    outpd = Time(outtime, format='jd').plot_date
    cursor = db.get_cursor()
    version = db.find_table_version(cursor, int(tstr[0]))
    query = 'select * from fV' + version + '_vD8 where (Timestamp between ' + tstr[
        0] + ' and ' + tstr[1] + ')'
    data, msg = db.do_query(cursor, query)
    pd = Time(data['Timestamp'][::8].astype(int), format='lv').plot_date
    inlet = data['Sche_Data_Roac_TempInlet'].reshape(
        len(pd), 8)  # Inlet temperature variation
    sinlet = np.sum(inlet.astype(float), 1)
    # Eliminate 0 values in sinlet by replacing with nearest good value
    bad, = np.where(sinlet == 0)
    good, = np.where(sinlet != 0)
    idx = nearest_val_idx(
        bad, good)  # Find locations of nearest good values to bad ones
    sinlet[bad] = sinlet[good[idx]]  # Overwrite bad values with good ones
    sinlet -= np.mean(
        sinlet)  # Remove offset, to provide zero-mean fluctuation
    sinlet = np.roll(
        sinlet,
        -110)  # Shift phase of variation by 110 s earlier (seems to be needed)
    # Interpolate sinlet values to the times in the data
    sint = np.interp(outpd, pd, sinlet)
    sdev = np.std(sint)
    sint_ok = np.abs(sint) < 2 * sdev
    bgnd = np.zeros((nf, nt), float)
    for i in range(nf):
        wlen = min(nt, 2000)
        if wlen % 2 != 0:
            wlen -= 1
        # Subtract smooth trend from data
        sig = tpdata['p'][i] - smooth(tpdata['p'][i], wlen,
                                      'blackman')[wlen / 2:-(wlen / 2 - 1)]
        # Eliminate the worst outliers and repeat
        stdev = np.nanstd(sig)
        good, = np.where(np.abs(sig) < stdev)
        if len(good) > nt * 0.1:
            wlen = min(len(good), 2000)
            if wlen % 2 != 0:
                wlen -= 1
            # Subtract smooth trend from data
            sig = tpdata['p'][i, good] - smooth(
                tpdata['p'][i,
                            good], wlen, 'blackman')[wlen / 2:-(wlen / 2 - 1)]
            sint_i = sint[good]
            stdev = np.std(sig)
            # Final check for data quality
            good, = np.where(np.logical_and(sig < 2 * stdev, sint_ok[good]))
            if len(good) > nt * 0.1:
                p = np.polyfit(sint_i[good], sig[good], 1)
            else:
                p = [1., 0.]
            # Apply correction for this frequency
            bgnd[i] = sint * p[0] + p[1]
    return bgnd