Beispiel #1
0
def apply_fem_level(data, gctime=None):
    ''' Applys the FEM level corrections to the given data dictionary.
        
        Inputs:
          data     A dictionary such as that returned by read_idb().
          gctime   A Time() object whose date specifies which GAINCALTEST
                     measurements to use.  If omitted, the date of the data
                     is used.

        Output:
          cdata    A dictionary with the level-corrected data.  The keys
                     p, x, p2, and a are all updated.
    '''
    from util import common_val_idx, nearest_val_idx
    import attncal as ac
    import copy

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    if gctime is None:
        gctime = trange[0]
    # Get time cadence
    dt = np.int(
        np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the FEM levels of the requested timerange
    src_lev = get_fem_level(trange,
                            dt)  # solar gain state for timerange of file
    nf = len(data['fghz'])
    nt = len(src_lev['times'])
    # First attempt to read from the SQL database.  If that fails, read from the IDB file itself
    try:
        attn = ac.read_attncal(gctime)[0]  # Attn from SQL
    except:
        attn = ac.get_attncal(
            gctime
        )[0]  # Attn measured by GAINCALTEST (returns a list, but use first, generally only, one)
    antgain = np.zeros((15, 2, nf, nt),
                       np.float32)  # Antenna-based gains [dB] vs. frequency
    # Find common frequencies of attn with data
    idx1, idx2 = common_val_idx(data['fghz'], attn['fghz'], precision=4)
    a = attn['attn']
    for i in range(13):
        for k, j in enumerate(idx1):
            antgain[i, 0, j] = a[src_lev['hlev'][i], i, 0, idx2[k]]
            antgain[i, 1, j] = a[src_lev['vlev'][i], i, 0, idx2[k]]
    cdata = copy.deepcopy(data)
    blgain = np.zeros((120, 4, nf, nt),
                      float)  # Baseline-based gains vs. frequency
    for i in range(14):
        for j in range(i + 1, 15):
            blgain[ri.bl2ord[i, j],
                   0] = 10**((antgain[i, 0] + antgain[j, 0]) / 20.)
            blgain[ri.bl2ord[i, j],
                   1] = 10**((antgain[i, 1] + antgain[j, 1]) / 20.)
            blgain[ri.bl2ord[i, j],
                   2] = 10**((antgain[i, 0] + antgain[j, 1]) / 20.)
            blgain[ri.bl2ord[i, j],
                   3] = 10**((antgain[i, 1] + antgain[j, 0]) / 20.)
    antgainf = 10**(antgain / 10.)

    #idx1, idx2 = common_val_idx(data['time'],src_gs['times'].jd)
    idx = nearest_val_idx(data['time'], src_lev['times'].jd)
    # Apply corrections (some times may be eliminated from the data)
    # Correct the cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # Correct the power
    cdata['p'][:15] *= antgainf[:, :, :, idx]
    # Correct the autocorrelation
    cdata['a'][:15, :2] *= antgainf[:, :, :, idx]
    cross_fac = np.sqrt(antgainf[:, 0] * antgainf[:, 1])
    cdata['a'][:15, 2] *= cross_fac[:, :, idx]
    cdata['a'][:15, 3] *= cross_fac[:, :, idx]
    # Correct the power-squared -- this should preserve SK
    cdata['p2'][:15] *= antgainf[:, :, :, idx]**2
    # Remove any uncorrected times before returning
    #cdata['time'] = cdata['time'][idx1]
    #cdata['p'] = cdata['p'][:,:,:,idx1]
    #cdata['a'] = cdata['a'][:,:,:,idx1]
    #cdata['p2'] = cdata['p2'][:,:,:,idx1]
    #cdata['ha'] = cdata['ha'][idx1]
    #cdata['m'] = cdata['m'][:,:,:,idx1]
    return cdata
Beispiel #2
0
def apply_fem_level(data, gctime=None):
    ''' Applys the FEM level corrections to the given data dictionary.
        
        Inputs:
          data     A dictionary such as that returned by read_idb().
          gctime   A Time() object whose date specifies which GAINCALTEST
                     measurements to use.  If omitted, the date of the data
                     is used.

        Output:
          cdata    A dictionary with the level-corrected data.  The keys
                     p, x, p2, and a are all updated.
    '''
    from util import common_val_idx, nearest_val_idx, bl2ord
    import attncal as ac
    from gaincal2 import get_fem_level
    import copy

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    if gctime is None:
        gctime = trange[0]
    # Get time cadence
    dt = np.int(
        np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the FEM levels of the requested timerange
    src_lev = get_fem_level(trange,
                            dt)  # solar gain state for timerange of file
    nf = len(data['fghz'])
    nt = len(src_lev['times'])
    attn = ac.read_attncal(
        gctime
    )[0]  # Reads attn from SQL database (returns a list, but use first, generally only, one)
    # attn = ac.get_attncal(gctime)[0]   # Analyzes GAINCALTEST (returns a list, but use first, generally only, one)
    antgain = np.zeros((15, 2, nf, nt),
                       np.float32)  # Antenna-based gains [dB] vs. frequency
    # Find common frequencies of attn with data
    idx1, idx2 = common_val_idx(data['fghz'], attn['fghz'], precision=4)
    # Currently, GAINCALTEST measures 8 levels of attenuation (16 dB).  I assumed this would be enough,
    # but the flare of 2017-09-10 actually went to 10 levels (20 dB), so we have no choice but to extend
    # to higher levels using only the nominal, 2 dB steps above the 8th level.  This part of the code
    # extends to the maximum 14 levels.
    a = np.zeros((14, 13, 2, nf), float)  # Extend attenuation to 14 levels
    a[:8, :, :, idx1] = attn[
        'attn'][:, :13, :,
                idx2]  # Use GAINCALTEST results in the first 8 levels
    for i in range(7, 13):
        # Extend to levels 9-14 by adding 2 dB to each previous level
        a[i + 1] = a[i] + 2.
    for i in range(13):
        for k, j in enumerate(idx1):
            antgain[i, 0, j] = a[src_lev['hlev'][i], i, 0, j]
            antgain[i, 1, j] = a[src_lev['vlev'][i], i, 0, j]
    cdata = copy.deepcopy(data)
    nblant = 136
    blgain = np.zeros((nf, nblant, 4, nt),
                      float)  # Baseline-based gains vs. frequency

    for i in range(14):
        for j in range(i, 14):
            k = bl2ord[i, j]
            blgain[:, k, 0] = 10**((antgain[i, 0] + antgain[j, 0]) / 20.)
            blgain[:, k, 1] = 10**((antgain[i, 1] + antgain[j, 1]) / 20.)
            blgain[:, k, 2] = 10**((antgain[i, 0] + antgain[j, 1]) / 20.)
            blgain[:, k, 3] = 10**((antgain[i, 1] + antgain[j, 0]) / 20.)
    # Reorder antgain axes to put frequencies in first slot, to match data
    antgain = np.swapaxes(np.swapaxes(antgain, 1, 2), 0, 1)
    antgainf = 10**(antgain / 10.)

    idx = nearest_val_idx(data['time'], src_lev['times'].jd)
    nt = len(idx)  # New number of times
    # Correct the auto- and cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # Reshape px and py arrays
    cdata['px'].shape = (nf, 16, 3, nt)
    cdata['py'].shape = (nf, 16, 3, nt)
    # Correct the power
    cdata['px'][:, :15, 0] *= antgainf[:, :, 0, idx]
    cdata['py'][:, :15, 0] *= antgainf[:, :, 1, idx]
    # Correct the power-squared
    cdata['px'][:, :15, 1] *= antgainf[:, :, 0, idx]**2
    cdata['py'][:, :15, 1] *= antgainf[:, :, 1, idx]**2
    # Reshape px and py arrays back to original
    cdata['px'].shape = (nf * 16 * 3, nt)
    cdata['py'].shape = (nf * 16 * 3, nt)
    return cdata
Beispiel #3
0
def apply_fem_level(data, skycal=None, gctime=None):
    ''' Applys the FEM level corrections to the given data dictionary.
        
        Inputs:
          data     A dictionary such as that returned by read_idb().
          skycal   A dictionary returned by skycal_anal() in calibration.py.  This is
                     used to subtract a small "receiver background" before scaling for
                     fem level, and then adding it back.
          gctime   A Time() object whose date specifies which GAINCALTEST
                     measurements to use.  If omitted, the date of the data
                     is used.

        Output:
          cdata    A dictionary with the level-corrected data.  The keys
                     p, x, p2, and a are all updated.
    '''
    from util import common_val_idx, nearest_val_idx
    import attncal as ac
    import copy

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    if gctime is None:
        gctime = trange[0]
    # Get time cadence
    dt = np.int(
        np.round(np.median(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the FEM levels of the requested timerange
    src_lev = get_fem_level(trange,
                            dt)  # solar gain state for timerange of file
    nf = len(data['fghz'])
    nt = len(src_lev['times'])
    # First attempt to read from the SQL database.  If that fails, read from the IDB file itself
    try:
        attn = ac.read_attncal(gctime)[0]  # Attn from SQL
        if (gctime.mjd - attn['time'].mjd) > 1:
            # SQL entry is too old, so analyze the GAINCALTEST
            attn = ac.get_attncal(
                gctime
            )[0]  # Attn measured by GAINCALTEST (returns a list, but use first, generally only, one)
            ch.fem_attn_val2sql([attn])  # Go ahead and write it to SQL
    except:
        attn = ac.get_attncal(
            gctime
        )[0]  # Attn measured by GAINCALTEST (returns a list, but use first, generally only, one)
    antgain = np.zeros((15, 2, nf, nt),
                       np.float32)  # Antenna-based gains [dB] vs. frequency
    # Find common frequencies of attn with data
    idx1, idx2 = common_val_idx(data['fghz'], attn['fghz'], precision=4)
    # Currently, GAINCALTEST measures 8 levels of attenuation (16 dB).  I assumed this would be enough,
    # but the flare of 2017-09-10 actually went to 10 levels (20 dB), so we have no choice but to extend
    # to higher levels using only the nominal, 2 dB steps above the 8th level.  This part of the code
    # extends to the maximum 16 levels.
    a = np.zeros((16, 13, 2, nf), float)  # Extend attenuation to 14 levels
    a[1:9, :, :, idx1] = attn[
        'attn'][:, :13, :,
                idx2]  # Use GAINCALTEST results in levels 1-9 (bottom level is 0dB)
    for i in range(8, 15):
        # Extend to levels 9-15 by adding 2 dB to each previous level
        a[i + 1] = a[i] + 2.
    a[15] = 62.  # Level 15 means 62 dB have been inserted.
    if dt:
        # For this case, src_lev is an array of dictionaries where keys are levels and
        # values are the proportion of that level for the given integration
        for i in range(13):
            for k, j in enumerate(idx1):
                for m in range(nt):
                    for lev, prop in src_lev['hlev'][i, m].items():
                        antgain[i, 0, j, m] += prop * a[lev, i, 0, idx2[k]]
                    for lev, prop in src_lev['vlev'][i, m].items():
                        antgain[i, 1, j, m] += prop * a[lev, i, 1, idx2[k]]
    else:
        # For this case, src_lev is just an array of levels
        for i in range(13):
            for k, j in enumerate(idx1):
                antgain[i, 0, j] = a[src_lev['hlev'][i], i, 0, idx2[k]]
                antgain[i, 1, j] = a[src_lev['vlev'][i], i, 1, idx2[k]]
    cdata = copy.deepcopy(data)
    blgain = np.zeros((120, 4, nf, nt),
                      float)  # Baseline-based gains vs. frequency
    for i in range(14):
        for j in range(i + 1, 15):
            blgain[ri.bl2ord[i, j],
                   0] = 10**((antgain[i, 0] + antgain[j, 0]) / 20.)
            blgain[ri.bl2ord[i, j],
                   1] = 10**((antgain[i, 1] + antgain[j, 1]) / 20.)
            blgain[ri.bl2ord[i, j],
                   2] = 10**((antgain[i, 0] + antgain[j, 1]) / 20.)
            blgain[ri.bl2ord[i, j],
                   3] = 10**((antgain[i, 1] + antgain[j, 0]) / 20.)
    antgainf = 10**(antgain / 10.)

    #idx1, idx2 = common_val_idx(data['time'],src_gs['times'].jd)
    idx = nearest_val_idx(data['time'], src_lev['times'].jd)
    # Apply corrections (some times may be eliminated from the data)
    # Correct the cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # If a skycal dictionary exists, subtract receiver noise before scaling
    # NB: This will break SK!
    if skycal:
        sna, snp, snf = skycal['rcvr_bgd'].shape
        bgd = skycal['rcvr_bgd'].repeat(nt).reshape((sna, snp, snf, nt))
        bgd_auto = skycal['rcvr_bgd_auto'].repeat(nt).reshape(
            (sna, snp, snf, nt))
        cdata['p'][:13] -= bgd[:, :, :, idx]
        cdata['a'][:13, :2] -= bgd_auto[:, :, :, idx]
    # Correct the power,
    cdata['p'][:15] *= antgainf[:, :, :, idx]
    # Correct the autocorrelation
    cdata['a'][:15, :2] *= antgainf[:, :, :, idx]
    # If a skycal dictionary exists, add back the receiver noise
    #if skycal:
    #    cdata['p'][:13] += bgd[:,:,:,idx]
    #    cdata['a'][:13,:2] += bgd_auto[:,:,:,idx]
    cross_fac = np.sqrt(antgainf[:, 0] * antgainf[:, 1])
    cdata['a'][:15, 2] *= cross_fac[:, :, idx]
    cdata['a'][:15, 3] *= cross_fac[:, :, idx]
    # Correct the power-squared -- this should preserve SK
    cdata['p2'][:15] *= antgainf[:, :, :, idx]**2
    # Remove any uncorrected times before returning
    #cdata['time'] = cdata['time'][idx1]
    #cdata['p'] = cdata['p'][:,:,:,idx1]
    #cdata['a'] = cdata['a'][:,:,:,idx1]
    #cdata['p2'] = cdata['p2'][:,:,:,idx1]
    #cdata['ha'] = cdata['ha'][idx1]
    #cdata['m'] = cdata['m'][:,:,:,idx1]
    return cdata
Beispiel #4
0
def autocorrect(out, ant_str='ant1-13'):
    nt = len(out['time'])
    nf = len(out['fghz'])
    pfac1 = (out['p'][:, :, :, :-1] -
             out['p'][:, :, :, 1:]) / out['p'][:, :, :, :-1]
    trange = Time(out['time'][[0, -1]], format='jd')
    src_lev = gc.get_fem_level(trange)  # Read FEM levels from SQL
    # Match times with data
    tidx = nearest_val_idx(out['time'], src_lev['times'].jd)
    # Find attenuation changes
    for ant in range(13):
        for pol in range(2):
            if pol == 0:
                lev = src_lev['hlev'][ant, tidx]
            else:
                lev = src_lev['vlev'][ant, tidx]
            jidx, = np.where(abs(lev[:-1] - lev[1:]) == 1)
            for freq in range(nf):
                idx, = np.where(
                    np.logical_and(
                        abs(pfac1[ant, pol, freq]) > 0.05,
                        abs(pfac1[ant, pol, freq]) < 0.95))
                for i in range(len(idx - 1)):
                    if idx[i] in jidx or idx[i] in jidx - 1:
                        out['p'][ant, pol, freq, idx[i] +
                                 1:] /= (1 - pfac1[ant, pol, freq, idx[i]])
    calfac = pc.get_calfac(trange[0])
    tpcalfac = calfac['tpcalfac']
    tpoffsun = calfac['tpoffsun']
    hlev = src_lev['hlev'][:13, 0]
    vlev = src_lev['vlev'][:13, 0]
    attn_dict = ac.read_attncal(trange[0])[0]  # Read GCAL attn from SQL
    attn = np.zeros((13, 2, nf))
    for i in range(13):
        attn[i, 0] = attn_dict['attn'][hlev[i], 0, 0]
        attn[i, 1] = attn_dict['attn'][vlev[i], 0, 1]
        print 'Ant', i + 1, attn[i, 0, 20], attn[i, 1, 20]
    attnfac = 10**(attn / 10.)
    for i in range(13):
        print attnfac[i, 0, 20], attnfac[i, 1, 20]
    for i in range(nt):
        out['p'][:13, :, :,
                 i] = (out['p'][:13, :, :, i] * attnfac - tpoffsun) * tpcalfac
    antlist = ant_str2list(ant_str)
    med = np.mean(np.median(out['p'][antlist], 0), 0)
    bg = np.median(med[:, 0:300], 1).repeat(nt).reshape(nf, nt)
    med -= bg
    pdata = np.log10(med)
    f, ax = plt.subplots(1, 1)
    vmax = np.median(np.nanmax(pdata, 1))
    im = ax.pcolormesh(Time(out['time'], format='jd').plot_date,
                       out['fghz'],
                       pdata,
                       vmin=1,
                       vmax=vmax)
    plt.colorbar(im, ax=ax, label='Log Flux Density [sfu]')
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
    ax.set_ylim(out['fghz'][0], out['fghz'][-1])
    ax.set_xlabel('Time [UT]')
    ax.set_ylabel('Frequency [GHz]')
    return out, med
Beispiel #5
0
def apply_fem_level(data, gctime=None, skycal=None):
    ''' Applys the FEM level corrections to the given data dictionary.
        
        Inputs:
          data     A dictionary such as that returned by readXdata().
          gctime   A Time() object whose date specifies which GAINCALTEST
                     measurements to use.  If omitted, the date of the data
                     is used.
          skycal   Optional array of receiver noise from SKYCAL or GAINCAL
                     calibration.  Only the receiver noise is applied (subtracted)

        Output:
          cdata    A dictionary with the level-corrected data.  The keys
                     p, x, p2, and a are all updated.
    '''
    import attncal as ac
    from gaincal2 import get_fem_level
    import copy

    # Get timerange from data
    trange = Time([data['time'][0], data['time'][-1]], format='jd')
    if gctime is None:
        gctime = trange[0]
    # Get time cadence
    dt = np.int(
        np.round(np.nanmedian(data['time'][1:] - data['time'][:-1]) * 86400))
    if dt == 1: dt = None
    # Get the FEM levels of the requested timerange
    src_lev = get_fem_level(trange,
                            dt)  # solar gain state for timerange of file
    nf = len(data['fghz'])
    nt = len(src_lev['times'])
    attn = ac.read_attncal(
        gctime
    )[0]  # Reads attn from SQL database (returns a list, but use first, generally only, one)
    # attn = ac.get_attncal(gctime)[0]   # Analyzes GAINCALTEST (returns a list, but use first, generally only, one)
    antgain = np.zeros((15, 2, nf, nt),
                       np.float32)  # Antenna-based gains [dB] vs. frequency
    # Find common frequencies of attn with data
    idx1, idx2 = common_val_idx(data['fghz'], attn['fghz'], precision=4)
    # Currently, GAINCALTEST measures 8 levels of attenuation (16 dB).  I assumed this would be enough,
    # but the flare of 2017-09-10 actually went to 10 levels (20 dB), so we have no choice but to extend
    # to higher levels using only the nominal, 2 dB steps above the 8th level.  This part of the code
    # extends to the maximum 16 levels.
    a = np.zeros((16, 13, 2, nf), float)  # Extend attenuation to 14 levels
    a[1:9, :, :, idx1] = attn[
        'attn'][:, :13, :,
                idx2]  # Use GAINCALTEST results in levels 1-9 (bottom level is 0dB)
    for i in range(8, 15):
        # Extend to levels 9-15 by adding 2 dB to each previous level
        a[i + 1] = a[i] + 2.
    a[15] = 62.  # Level 15 means 62 dB have been inserted.
    #print 'Attn list (dB) for ant 1, pol xx, lowest frequency:',a[:,0,0,0]
    if dt:
        # For this case, src_lev is an array of dictionaries where keys are levels and
        # values are the proportion of that level for the given integration
        for i in range(13):
            for k, j in enumerate(idx1):
                for m in range(nt):
                    for lev, prop in src_lev['hlev'][i, m].items():
                        antgain[i, 0, j, m] += prop * a[lev, i, 0, idx2[k]]
                    for lev, prop in src_lev['vlev'][i, m].items():
                        antgain[i, 1, j, m] += prop * a[lev, i, 1, idx2[k]]
    else:
        # For this case, src_lev is just an array of levels
        for i in range(13):
            for k, j in enumerate(idx1):
                antgain[i, 0, j] = a[src_lev['hlev'][i], i, 0, idx2[k]]
                antgain[i, 1, j] = a[src_lev['vlev'][i], i, 1, idx2[k]]
    cdata = copy.deepcopy(data)
    nblant = 136
    blgain = np.zeros((nf, nblant, 4, nt),
                      float)  # Baseline-based gains vs. frequency

    for i in range(14):
        for j in range(i, 14):
            k = bl2ord[i, j]
            blgain[:, k, 0] = 10**((antgain[i, 0] + antgain[j, 0]) / 20.)
            blgain[:, k, 1] = 10**((antgain[i, 1] + antgain[j, 1]) / 20.)
            blgain[:, k, 2] = 10**((antgain[i, 0] + antgain[j, 1]) / 20.)
            blgain[:, k, 3] = 10**((antgain[i, 1] + antgain[j, 0]) / 20.)
    # Reorder antgain axes to put frequencies in first slot, to match data
    antgain = np.swapaxes(np.swapaxes(antgain, 1, 2), 0, 1)
    antgainf = 10**(antgain / 10.)

    idx = nearest_val_idx(data['time'], src_lev['times'].jd)
    nt = len(idx)  # New number of times
    # If a skycal dictionary exists, subtract auto-correlation receiver noise before scaling (clip to 0)
    if skycal:
        sna, snp, snf = skycal['rcvr_bgd_auto'].shape
        bgd = skycal['rcvr_bgd_auto'].repeat(nt).reshape((sna, snp, snf, nt))
        bgd = bgd[:, :, idx2]  # Extract only frequencies matching the data
        # Reorder axes
        bgd = np.swapaxes(bgd, 0, 2)
        #        bslice = bgd[:,:,:,idx]
        for i in range(13):
            cdata['x'][:, bl2ord[i, i], 0] = np.clip(
                cdata['x'][:, bl2ord[i, i], 0] - bgd[:, 0, i], 0,
                None)  #bslice[:,0,i],0,None)
            cdata['x'][:, bl2ord[i, i], 1] = np.clip(
                cdata['x'][:, bl2ord[i, i], 1] - bgd[:, 1, i], 0,
                None)  #bslice[:,1,i],0,None)
    # Correct the auto- and cross-correlation data
    cdata['x'] *= blgain[:, :, :, idx]
    # Reshape px and py arrays
    cdata['px'].shape = (nf, 16, 3, nt)
    cdata['py'].shape = (nf, 16, 3, nt)
    # If a skycal dictionary exists, subtract total power receiver noise before scaling (clip to 0)
    # NB: This will break SK!
    if skycal:
        sna, snp, snf = skycal['rcvr_bgd'].shape
        bgd = skycal['rcvr_bgd'].repeat(nt).reshape((sna, snp, snf, nt))
        bgd = bgd[:, :, idx2]  # Extract only frequencies matching the data
        # Reorder axes
        bgd = np.swapaxes(bgd, 0, 2)
        #bslice = bgd[:,:,:,idx]
        #bgnd = np.rollaxis(bslice,3)
        cdata['px'][:, :13, 0] = np.clip(cdata['px'][:, :13, 0] - bgd[:, 0], 0,
                                         None)  #bslice[:,0],0,None)
        cdata['py'][:, :13, 0] = np.clip(cdata['py'][:, :13, 0] - bgd[:, 1], 0,
                                         None)  #bslice[:,1],0,None)
    # Correct the power
    cdata['px'][:, :15, 0] *= antgainf[:, :, 0, idx]
    cdata['py'][:, :15, 0] *= antgainf[:, :, 1, idx]
    # Correct the power-squared
    cdata['px'][:, :15, 1] *= antgainf[:, :, 0, idx]**2
    cdata['py'][:, :15, 1] *= antgainf[:, :, 1, idx]**2
    # Reshape px and py arrays back to original
    cdata['px'].shape = (nf * 16 * 3, nt)
    cdata['py'].shape = (nf * 16 * 3, nt)
    return cdata
Beispiel #6
0
def autocorrect(out, ant_str='ant1-13', brange=[0, 300]):
    nt = len(out['time'])
    nf = len(out['fghz'])
    pfac1 = (out['p'][:, :, :, :-1] -
             out['p'][:, :, :, 1:]) / out['p'][:, :, :, :-1]
    trange = Time(out['time'][[0, -1]], format='jd')
    src_lev = gc.get_fem_level(trange)  # Read FEM levels from SQL
    # Match times with data
    tidx = nearest_val_idx(out['time'], src_lev['times'].jd)
    # Find attenuation changes
    for ant in range(13):
        for pol in range(2):
            if pol == 0:
                lev = src_lev['hlev'][ant, tidx]
            else:
                lev = src_lev['vlev'][ant, tidx]
            jidx, = np.where(abs(lev[:-1] - lev[1:]) == 1)
            for freq in range(nf):
                idx, = np.where(
                    np.logical_and(
                        abs(pfac1[ant, pol, freq]) > 0.05,
                        abs(pfac1[ant, pol, freq]) < 0.95))
                for i in range(len(idx - 1)):
                    if idx[i] in jidx or idx[i] in jidx - 1:
                        out['p'][ant, pol, freq, idx[i] +
                                 1:] /= (1 - pfac1[ant, pol, freq, idx[i]])
    # Time of total power calibration is 20 UT on the date given
    tptime = Time(np.floor(trange[0].mjd) + 20. / 24., format='mjd')
    calfac = pc.get_calfac(tptime)
    tpcalfac = calfac['tpcalfac']
    tpoffsun = calfac['tpoffsun']
    hlev = src_lev['hlev'][:13, 0]
    vlev = src_lev['vlev'][:13, 0]
    attn_dict = ac.read_attncal(trange[0])[0]  # Read GCAL attn from SQL
    attn = np.zeros((13, 2, nf))
    for i in range(13):
        attn[i, 0] = attn_dict['attn'][hlev[i], 0, 0]
        attn[i, 1] = attn_dict['attn'][vlev[i], 0, 1]
        print 'Ant', i + 1, attn[i, 0, 20], attn[i, 1, 20]
    attnfac = 10**(attn / 10.)
    for i in range(13):
        print attnfac[i, 0, 20], attnfac[i, 1, 20]
    for i in range(nt):
        out['p'][:13, :, :,
                 i] = (out['p'][:13, :, :, i] * attnfac - tpoffsun) * tpcalfac
    antlist = ant_str2list(ant_str)
    bg = np.zeros_like(out['p'])
    # Subtract background for each antenna/polarization
    for ant in antlist:
        for pol in range(2):
            bg[ant,
               pol] = np.median(out['p'][ant, pol, :, brange[0]:brange[1]],
                                1).repeat(nt).reshape(nf, nt)
            #out['p'][ant,pol] -= bg
    # Form median over antennas/pols
    med = np.mean(np.median((out['p'] - bg)[antlist], 0), 0)
    # Do background subtraction once more for good measure
    bgd = np.median(med[:, brange[0]:brange[1]], 1).repeat(nt).reshape(nf, nt)
    med -= bgd
    pdata = np.log10(med)
    f, ax = plt.subplots(1, 1)
    vmax = np.median(np.nanmax(pdata, 1))
    im = ax.pcolormesh(Time(out['time'], format='jd').plot_date,
                       out['fghz'],
                       pdata,
                       vmin=1,
                       vmax=vmax)
    ax.axvspan(Time(out['time'][brange[0]], format='jd').plot_date,
               Time(out['time'][brange[1]], format='jd').plot_date,
               color='w',
               alpha=0.3)
    cbar = plt.colorbar(im, ax=ax)
    cbar.set_label('Log Flux Density [sfu]')
    ax.xaxis_date()
    ax.xaxis.set_major_formatter(DateFormatter("%H:%M"))
    ax.set_ylim(out['fghz'][0], out['fghz'][-1])
    ax.set_xlabel('Time [UT]')
    ax.set_ylabel('Frequency [GHz]')
    return {'caldata': out, 'med_sub': med, 'bgd': bg}