def plot_survey_data_and_metadata(fig,
                                  S_data,
                                  plot_map=False,
                                  bus_timestamps=False,
                                  t1=None,
                                  t2=None,
                                  line_plots=[
                                      'Lshell', 'altitude', 'velocity', 'lat',
                                      'lon', 'solution_status', 'solution_type'
                                  ],
                                  show_plots=False,
                                  lshell_file='resources/Lshell_dict.pkl',
                                  cal_file=None,
                                  E_gain=False,
                                  B_gain=False):

    logger = logging.getLogger()

    if plot_map:
        from mpl_toolkits.basemap import Basemap
        from scipy.interpolate import interp1d, interp2d

    if plot_map or (len(line_plots) > 1):
        # The full plot:
        gs_root = GS.GridSpec(2,
                              2,
                              height_ratios=[1, 2],
                              width_ratios=[1, 1.5],
                              wspace=0.15,
                              hspace=0.05,
                              figure=fig)
        gs_data = GS.GridSpecFromSubplotSpec(2,
                                             2,
                                             height_ratios=[0.2, 10],
                                             width_ratios=[20, 0.5],
                                             wspace=0.025,
                                             hspace=0.025,
                                             subplot_spec=gs_root[:, 1])
        m_ax = fig.add_subplot(gs_root[0, 0])
    else:
        gs_data = GS.GridSpec(2,
                              2,
                              width_ratios=[20, 1],
                              wspace=0.05,
                              hspace=0.05,
                              figure=fig)

    # colormap -- parula is a clone of the Matlab colormap; also try plt.cm.jet or plt.cm.viridis
    cm = parula()
    #plt.cm.viridis;

    # Sort by header timestamps
    S_data = sorted(S_data, key=lambda f: f['header_timestamp'])

    # Subset of data with GPS stamps included.
    # We need these for the line plots, regardless if we're using payload or bus timestamps.
    # Also confirm that we have at least one field from BESTPOS and BESTVEL messages,
    # since on rare occasions we miss one or the other.
    S_with_GPS = list(
        filter(
            lambda x:
            (('GPS' in x) and ('timestamp' in x['GPS'][0]) and
             ('lat' in x['GPS'][0]) and ('horiz_speed' in x['GPS'][0])),
            S_data))
    S_with_GPS = sorted(S_with_GPS, key=lambda f: f['GPS'][0]['timestamp'])

    logger.info(f'{len(S_with_GPS)} GPS packets')
    T_gps = np.array([x['GPS'][0]['timestamp'] for x in S_with_GPS])
    dts_gps = np.array([
        datetime.datetime.fromtimestamp(x, tz=datetime.timezone.utc)
        for x in T_gps
    ])

    # Build arrays
    E = []
    B = []
    T = []
    F = np.arange(512) * 40 / 512

    # # Only plot survey data if we have GPS data to match
    if bus_timestamps:
        logger.info('Using bus timestamps')
        # Sort using bus timestamp (finer resolution, but
        # includes transmission error from payload to bus)
        for S in S_data:
            T.append(S['header_timestamp'])
            E.append(S['E_data'])
            B.append(S['B_data'])
    else:
        logger.info('using payload timestamps')
        # Sort using payload GPS timestamp (rounded to nearest second.
        # Ugh, why didn't we just save a local microsecond counter... do that on CANVAS please)
        for S in S_with_GPS:
            T.append(S['GPS'][0]['timestamp'])

            B.append(S['B_data'])

            # cal info
            gain = S['gain']
            survey_type = S['survey_type']

            if gain == 'high':
                gain_f = 1
            else:  # low gain
                gain_f = 10
            if survey_type == 'short':
                shift = 55
            else:  # long survey
                shift = 64
            #print(shift)

            # append the calibrated the data
            E.append((10 * np.log10(gain_f * 2**(S['E_data'] / 8))) - shift)

    T = np.array(T)

    dates = np.array([datetime.datetime.utcfromtimestamp(t) for t in T])

    if t1 is None:
        t1 = dates[0]
    if t2 is None:
        t2 = dates[-1]
    # -----------------------------------
    # Spectrograms
    # -----------------------------------
    E = np.array(E)
    B = np.array(B)
    T = np.array(T)
    logger.debug(f'E has shape {np.shape(E)}, B has shape {np.shape(B)}')

    # gs_data = GS.GridSpec(2, 2, width_ratios=[20, 1], wspace = 0.05, hspace = 0.05, subplot_spec=gs_root[1])
    # ax1 = fig.add_subplot(gs_data[0,0])
    ax2 = fig.add_subplot(gs_data[1, 0])  # sharex=ax1, sharey=ax1)
    # e_cbax = fig.add_subplot(gs_data[0,1])
    e_cbax = fig.add_subplot(gs_data[1, 1])

    e_clims = [-40, 10]  #[0,255] #[-80,-40]
    # b_clims = [150,255] #[0,255] #[-80,-40]

    date_edges = np.insert(dates, 0, dates[0] - datetime.timedelta(seconds=26))

    # Insert columns of NaNs wherever we have gaps in data (dt > 27 sec)
    per_sec = 26  # Might want to look this up for the shorter survey modes
    gaps = np.where(
        np.diff(date_edges) > datetime.timedelta(seconds=(per_sec + 2)))[0]

    d_gapped = np.insert(dates, gaps,
                         dates[gaps] - datetime.timedelta(seconds=per_sec + 3))
    E_gapped = np.insert(E.astype('float'),
                         gaps - 1,
                         np.nan * np.ones([1, 512]),
                         axis=0)
    B_gapped = np.insert(B.astype('float'),
                         gaps - 1,
                         np.nan * np.ones([1, 512]),
                         axis=0)

    # Plot E data
    # p1 = ax1.pcolormesh(d_gapped,F,E_gapped.T, vmin=e_clims[0], vmax=e_clims[1], shading='flat', cmap = cm);
    p2 = ax2.pcolormesh(d_gapped,
                        F,
                        E_gapped.T,
                        vmin=e_clims[0],
                        vmax=e_clims[1],
                        shading='flat',
                        cmap=cm)
    # cb1 = fig.colorbar(p1, cax = e_cbax)
    cb2 = fig.colorbar(p2, cax=e_cbax)
    # cb1.set_label(f'Raw value [{e_clims[0]}-{e_clims[1]}]')
    cb2.set_label('dB[(uV/m)^2/Hz]')

    # # vertical lines at each edge (kinda nice, but messy for big plots)
    # g1 = ax1.vlines(dates, 0, 40, linewidth=0.2, alpha=0.5, color='w')
    # g2 = ax2.vlines(dates, 0, 40, linewidth=0.2, alpha=0.5, color='w')

    ax2.set_xticklabels([])
    # ax1.set_ylim([0,40])
    ax2.set_ylim([0, 40])

    formatter = mdates.DateFormatter('%H:%M:%S')
    ax2.xaxis.set_major_formatter(formatter)
    fig.autofmt_xdate()
    ax2.set_xlabel(
        "Time (H:M:S) on \n%s" %
        datetime.datetime.utcfromtimestamp(T[0]).strftime("%Y-%m-%d"))
    # ax2.set_xlabel("Time (H:M:S)")

    # ax1.set_ylabel('E channel\nFrequency [kHz]')
    ax2.set_ylabel('E channel Frequency [kHz]')

    # -----------------------------------
    # Ground track Map
    # -----------------------------------

    if plot_map:
        m = Basemap(projection='mill',
                    lon_0=0,
                    ax=m_ax,
                    llcrnrlon=-180,
                    llcrnrlat=-70,
                    urcrnrlon=180,
                    urcrnrlat=70)
        lats = [x['GPS'][0]['lat'] for x in S_with_GPS]
        lons = [x['GPS'][0]['lon'] for x in S_with_GPS]

        sx, sy = m(lons, lats)

        m.drawcoastlines(color='k', linewidth=1, ax=m_ax)
        m.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
        m.drawmeridians(np.arange(m.lonmin, m.lonmax + 30, 60),
                        labels=[0, 0, 1, 0])
        m.drawmapboundary(fill_color='cyan')
        m.fillcontinents(color='white', lake_color='cyan')

        TX_file = 'resources/nb_transmitters.conf'
        show_transmitters_survey(m, TX_file)  # add dots for tx

        # This is sloppy -- we need to stash the scatterplot in a persistent object,
        # but because this is just a script and not a class, it vanishes. So we're
        # sticking it into the top figure for now. (This is so we can update the point
        # visibility when zooming in and out in the GUI)
        m_ax.s = m.scatter(sx,
                           sy,
                           c=T_gps,
                           marker='.',
                           s=10,
                           cmap=get_cmap('plasma'),
                           zorder=100,
                           picker=5)

        hits = np.where(dates >= datetime.datetime(1979, 1, 1, 0, 0, 0))

        # Enable click events on the map:
        def onpick(event):
            ''' Event handler for a point click '''
            ind = event.ind
            t_center = dates[ind[0]]
            logger.info(f't = {t_center}')
            ax_lines[-1].set_xlim(t_center - datetime.timedelta(minutes=15),
                                  t_center + datetime.timedelta(minutes=15))
            onzoom(ax1)
            fig.canvas.draw()

        def onzoom(axis, *args, **kwargs):
            # Update the map to only show points within range:
            [tt1, tt2] = axis.get_xlim()
            d1 = mdates.num2date(tt1)
            d2 = mdates.num2date(tt2)
            hits = np.where((dts_gps >= d1) & (dts_gps <= d2))[0]

            logger.debug(f'zoomed to {d1}, {d2} ({len(hits)} hits)')
            try:
                m_ax.s.remove()
            except:
                logger.debug('failed to remove scatter points')

            m_ax.s = m.scatter(np.array(sx)[hits],
                               np.array(sy)[hits],
                               c=T_gps[hits],
                               marker='.',
                               s=10,
                               cmap=get_cmap('plasma'),
                               zorder=100,
                               picker=5)

        # Attach callback
        # ax1.callbacks.connect('xlim_changed', onzoom)
        ax2.callbacks.connect('xlim_changed', onzoom)

        cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event))
    # -----------------------------------
    # Line plots
    # -----------------------------------
    #fig.show()
    return fig
Example #2
0
def plot_survey_data_and_metadata(fig,
                                  S_data,
                                  plot_map=True,
                                  bus_timestamps=False,
                                  t1=15,
                                  t2=18,
                                  line_plots=[
                                      'Lshell', 'altitude', 'lat', 'lon',
                                      'solution_status', 'daylight'
                                  ],
                                  show_plots=True,
                                  lshell_file='resources/Lshell_dict.pkl',
                                  cal_file=None,
                                  E_gain=False,
                                  B_gain=False):

    if plot_map or (len(line_plots) > 1):
        # The full plot:
        gs_root = GS.GridSpec(2,
                              2,
                              height_ratios=[1, 2],
                              width_ratios=[1, 1.5],
                              wspace=0.15,
                              hspace=0.05,
                              figure=fig)
        gs_data = GS.GridSpecFromSubplotSpec(2,
                                             2,
                                             height_ratios=[0.2, 10],
                                             width_ratios=[20, 0.5],
                                             wspace=0.025,
                                             hspace=0.025,
                                             subplot_spec=gs_root[:, 1])
        m_ax = fig.add_subplot(gs_root[0, 0])
    else:
        gs_data = GS.GridSpec(2,
                              2,
                              width_ratios=[20, 1],
                              wspace=0.05,
                              hspace=0.05,
                              figure=fig)

    # colormap -- parula is a clone of the Matlab colormap; also try plt.cm.jet or plt.cm.viridis
    cm = parula()
    #plt.cm.viridis;

    # Sort by header timestamps
    S_data = sorted(S_data, key=lambda f: f['header_timestamp'])

    # Subset of data with GPS stamps included.
    # We need these for the line plots, regardless if we're using payload or bus timestamps.
    # Also confirm that we have at least one field from BESTPOS and BESTVEL messages,
    # since on rare occasions we miss one or the other.
    S_with_GPS = list(
        filter(
            lambda x:
            (('GPS' in x) and ('timestamp' in x['GPS'][0]) and
             ('lat' in x['GPS'][0]) and ('horiz_speed' in x['GPS'][0])),
            S_data))
    S_with_GPS = sorted(S_with_GPS, key=lambda f: f['GPS'][0]['timestamp'])

    T_gps = np.array([x['GPS'][0]['timestamp'] for x in S_with_GPS])
    dts_gps = np.array([
        datetime.datetime.fromtimestamp(x, tz=datetime.timezone.utc)
        for x in T_gps
    ])

    # Build arrays
    E = []
    B = []
    T = []
    F = np.arange(512) * 40 / 512

    # # Only plot survey data if we have GPS data to match
    if bus_timestamps:

        # Sort using bus timestamp (finer resolution, but
        # includes transmission error from payload to bus)
        for S in S_data:
            T.append(S['header_timestamp'])
            E.append(S['E_data'])
            B.append(S['B_data'])
    else:

        # Sort using payload GPS timestamp (rounded to nearest second.
        # Ugh, why didn't we just save a local microsecond counter... do that on CANVAS please)
        for S in S_with_GPS:
            T.append(S['GPS'][0]['timestamp'])

            B.append(S['B_data'])

            # cal info
            gain = S['gain']
            survey_type = S['survey_type']

            if gain == 'high':
                gain_f = 1
            else:  # low gain
                gain_f = 10
            if survey_type == 'short':
                shift = 55
            else:  # long survey
                shift = 64
            #print(shift)

            # append the calibrated the data
            E.append((10 * np.log10(gain_f * 2**(S['E_data'] / 8))) - shift)

    T = np.array(T)

    dates = np.array([datetime.datetime.utcfromtimestamp(t) for t in T])

    if t1 is None:
        t1 = dates[0]
    if t2 is None:
        t2 = dates[-1]
    # -----------------------------------
    # Spectrograms
    # -----------------------------------
    E = np.array(E)
    B = np.array(B)
    T = np.array(T)

    # gs_data = GS.GridSpec(2, 2, width_ratios=[20, 1], wspace = 0.05, hspace = 0.05, subplot_spec=gs_root[1])
    # ax1 = fig.add_subplot(gs_data[0,0])
    ax2 = fig.add_subplot(gs_data[1, 0])  # sharex=ax1, sharey=ax1)
    # e_cbax = fig.add_subplot(gs_data[0,1])
    e_cbax = fig.add_subplot(gs_data[1, 1])

    e_clims = [-40, 10]  #[0,255] #[-80,-40]
    # b_clims = [150,255] #[0,255] #[-80,-40]

    date_edges = np.insert(dates, 0, dates[0] - datetime.timedelta(seconds=26))

    # Insert columns of NaNs wherever we have gaps in data (dt > 27 sec)
    per_sec = 26  # Might want to look this up for the shorter survey modes
    gaps = np.where(
        np.diff(date_edges) > datetime.timedelta(seconds=(per_sec + 2)))[0]

    d_gapped = np.insert(dates, gaps,
                         dates[gaps] - datetime.timedelta(seconds=per_sec + 3))
    E_gapped = np.insert(E.astype('float'),
                         gaps - 1,
                         np.nan * np.ones([1, 512]),
                         axis=0)
    B_gapped = np.insert(B.astype('float'),
                         gaps - 1,
                         np.nan * np.ones([1, 512]),
                         axis=0)

    # Plot E data
    # p1 = ax1.pcolormesh(d_gapped,F,E_gapped.T, vmin=e_clims[0], vmax=e_clims[1], shading='flat', cmap = cm);
    p2 = ax2.pcolormesh(d_gapped,
                        F,
                        E_gapped.T,
                        vmin=e_clims[0],
                        vmax=e_clims[1],
                        shading='flat',
                        cmap=cm)
    # cb1 = fig.colorbar(p1, cax = e_cbax)
    cb2 = fig.colorbar(p2, cax=e_cbax)
    # cb1.set_label(f'Raw value [{e_clims[0]}-{e_clims[1]}]')
    cb2.set_label('dB[(uV/m)^2/Hz]')

    # # vertical lines at each edge (kinda nice, but messy for big plots)
    # g1 = ax1.vlines(dates, 0, 40, linewidth=0.2, alpha=0.5, color='w')
    # g2 = ax2.vlines(dates, 0, 40, linewidth=0.2, alpha=0.5, color='w')

    ax2.set_xticklabels([])
    # ax1.set_ylim([0,40])
    ax2.set_ylim([0, 40])

    formatter = mdates.DateFormatter('%H:%M:%S')
    ax2.xaxis.set_major_formatter(formatter)
    fig.autofmt_xdate()
    ax2.set_xlabel(
        "Time (H:M:S) on \n%s" %
        datetime.datetime.utcfromtimestamp(T[0]).strftime("%Y-%m-%d"))
    # ax2.set_xlabel("Time (H:M:S)")

    # ax1.set_ylabel('E channel\nFrequency [kHz]')
    ax2.set_ylabel('E channel Frequency [kHz]')

    # -----------------------------------
    # Ground track Map
    # -----------------------------------

    if plot_map:
        m = Basemap(projection='mill',
                    lon_0=0,
                    ax=m_ax,
                    llcrnrlon=-180,
                    llcrnrlat=-70,
                    urcrnrlon=180,
                    urcrnrlat=70)
        lats = [x['GPS'][0]['lat'] for x in S_with_GPS]
        lons = [x['GPS'][0]['lon'] for x in S_with_GPS]

        sx, sy = m(lons, lats)

        m.drawcoastlines(color='k', linewidth=1, ax=m_ax)
        m.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
        m.drawmeridians(np.arange(m.lonmin, m.lonmax + 30, 60),
                        labels=[0, 0, 1, 0])
        m.drawmapboundary(fill_color='cyan')
        m.fillcontinents(color='white', lake_color='cyan')

        TX_file = 'resources/nb_transmitters.conf'
        show_transmitters_survey(m, TX_file)  # add dots for tx

        # This is sloppy -- we need to stash the scatterplot in a persistent object,
        # but because this is just a script and not a class, it vanishes. So we're
        # sticking it into the top figure for now. (This is so we can update the point
        # visibility when zooming in and out in the GUI)
        m_ax.s = m.scatter(sx,
                           sy,
                           c=T_gps,
                           marker='.',
                           s=10,
                           cmap=get_cmap('plasma'),
                           zorder=100,
                           picker=5)

        hits = np.where(dates >= datetime.datetime(1979, 1, 1, 0, 0, 0))

        # Enable click events on the map:
        def onpick(event):
            ''' Event handler for a point click '''
            ind = event.ind
            t_center = dates[ind[0]]

            ax_lines[-1].set_xlim(t_center - datetime.timedelta(minutes=15),
                                  t_center + datetime.timedelta(minutes=15))
            onzoom(ax1)
            fig.canvas.draw()

        def onzoom(axis, *args, **kwargs):
            # Update the map to only show points within range:
            [tt1, tt2] = axis.get_xlim()
            d1 = mdates.num2date(tt1)
            d2 = mdates.num2date(tt2)
            hits = np.where((dts_gps >= d1) & (dts_gps <= d2))[0]

            try:
                m_ax.s.remove()
            except:
                pass

            m_ax.s = m.scatter(np.array(sx)[hits],
                               np.array(sy)[hits],
                               c=T_gps[hits],
                               marker='.',
                               s=10,
                               cmap=get_cmap('plasma'),
                               zorder=100,
                               picker=5)

        # Attach callback
        # ax1.callbacks.connect('xlim_changed', onzoom)
        ax2.callbacks.connect('xlim_changed', onzoom)

        cid = fig.canvas.mpl_connect('pick_event', lambda event: onpick(event))

    if len(line_plots) > 0:
        gs_lineplots = GS.GridSpecFromSubplotSpec(len(line_plots),
                                                  1,
                                                  hspace=0.5,
                                                  subplot_spec=gs_root[1, 0])

        ax_lines = []

        for ind, a in enumerate(line_plots):
            ax_lines.append(fig.add_subplot(gs_lineplots[ind]))

        markersize = 4
        markerface = '.'
        markeralpha = 0.6

        for ind, a in enumerate(line_plots):

            if a in S_with_GPS[0]['GPS'][0]:
                yvals = np.array([x['GPS'][0][a] for x in S_with_GPS])
                ax_lines[ind].plot(dts_gps,
                                   yvals,
                                   markerface,
                                   markersize=markersize,
                                   label=a,
                                   alpha=markeralpha)
                ax_lines[ind].set_ylabel(a, rotation=0, labelpad=30)
            elif a in 'altitude':
                yvals = np.array([x['GPS'][0]['alt']
                                  for x in S_with_GPS]) / 1000.
                ax_lines[ind].plot(dts_gps,
                                   yvals,
                                   markerface,
                                   markersize=markersize,
                                   label=a,
                                   alpha=markeralpha)
                ax_lines[ind].set_ylabel('Altitude\n[km]',
                                         rotation=0,
                                         labelpad=30)
                ax_lines[ind].set_ylim([450, 500])
            elif a in 'dt':
                ax_lines[ind].plot(dts_gps,
                                   T - T_gps,
                                   markerface,
                                   markersize=markersize,
                                   label=a,
                                   alpha=markeralpha)
                ax_lines[ind].set_ylabel(r't$_{header}$ - t$_{GPS}$',
                                         rotation=0,
                                         labelpad=30)
            elif a in 'velocity':
                v_horiz = np.array(
                    [x['GPS'][0]['horiz_speed'] for x in S_with_GPS])
                v_vert = np.array(
                    [x['GPS'][0]['vert_speed'] for x in S_with_GPS])
                vel = np.sqrt(v_horiz * v_horiz + v_vert * v_vert) / 1000.

                ax_lines[ind].plot(dts_gps,
                                   vel,
                                   markerface,
                                   markersize=markersize,
                                   alpha=markeralpha,
                                   label='Velocity')
                ax_lines[ind].set_ylabel('Velocity\n[km/sec]',
                                         rotation=0,
                                         labelpad=30)
                ax_lines[ind].set_ylim([5, 10])
            elif a in 'Lshell':
                try:
                    # This way using a precomputed lookup table:
                    with open(lshell_file, 'rb') as file:
                        Ldict = pickle.load(file)
                    L_interp = interp2d(Ldict['glon'],
                                        Ldict['glat'],
                                        Ldict['L'],
                                        kind='cubic')
                    Lshell = np.array(
                        [L_interp(x, y) for x, y in zip(lons, lats)])

                    ax_lines[ind].plot(dts_gps,
                                       Lshell,
                                       markerface,
                                       markersize=markersize,
                                       alpha=markeralpha,
                                       label='L shell')
                    ax_lines[ind].set_ylabel('L shell',
                                             rotation=0,
                                             labelpad=30)
                    ax_lines[ind].set_ylim([1, 8])
                except:
                    pass
            if a in 'daylight':
                # Day or night based on ground track, using the daynight terminator from Basemap
                dayvec = np.array(
                    [is_day(x, y, z) for x, y, z in zip(dts_gps, lats, lons)])
                ax_lines[ind].plot(dts_gps,
                                   dayvec,
                                   markerface,
                                   markersize=markersize,
                                   alpha=markeralpha,
                                   label='Day / Night')
                ax_lines[ind].set_yticks([False, True])
                ax_lines[ind].set_yticklabels(['Night', 'Day'])

        fig.autofmt_xdate()

        for a in ax_lines[:-1]:
            a.set_xticklabels([])

        # Link line plot x axes:
        for a in ax_lines:
            ax_lines[0].get_shared_x_axes().join(ax_lines[0], a)

        # Link data x axes:
        #ax_lines[0].get_shared_x_axes().join(ax_lines[0], ax1)
        ax_lines[0].get_shared_x_axes().join(ax_lines[0], ax2)

        ax_lines[-1].set_xticklabels(ax_lines[-1].get_xticklabels(),
                                     rotation=30)
        ax_lines[-1].xaxis.set_major_formatter(formatter)
        ax_lines[-1].set_xlabel(
            "Time (H:M:S) on \n%s" %
            datetime.datetime.utcfromtimestamp(T[0]).strftime("%Y-%m-%d"))

        day = datetime.datetime.strptime('VPM_survey_data_2020-06-28.xml',
                                         'VPM_survey_data_%Y-%m-%d.xml')
        d1 = day + datetime.timedelta(hours=15, minutes=0)
        d2 = day + datetime.timedelta(hours=18, minutes=0)
        ax_lines[-1].set_xlim([d1, d2])

    fig.suptitle(f"VPM Survey Data: {day.strftime('%D')}\n" +
                 f"{d1.strftime('%H:%M:%S')} -- {d2.strftime('%H:%M:%S')} UT")
    rasterize_list = [p2]
    rasterize_and_save(
        '/Users/rileyannereid/macworkspace/VPM_data/issues/survey.svg',
        rasterize_list,
        dpi=300)

    # -----------------------------------

    return fig
Example #3
0
def plot_burst_FD(fig, burst, cal_data=None):
    logger = logging.getLogger('plot_burst_FD')

    logger.debug(burst['config'])
    cfg = burst['config']

    logger.info(f'burst configuration: {cfg}')

    system_delay_samps_TD = 73
    system_delay_samps_FD = 200
    fs = 80000
    # cm = plt.cm.jet
    cm = parula()
    # This is a mockup of the current Matlab colormap (which is proprietary)

    # Check if we have any status packets included -- we'll get
    # the uBBR configuration from these.
    if 'bbr_config' in burst:
        bbr_config = burst['bbr_config']
    elif 'I' in burst:
        logger.debug(f"Found {len(burst['I'])} status packets")
        # Get uBBR config command:
        if 'prev_bbr_command' in burst['I'][0]:
            bbr_config = decode_uBBR_command(burst['I'][0]['prev_bbr_command'])
        else:
            ps = decode_status([burst['I'][0]])
            bbr_config = decode_uBBR_command(ps[0]['prev_bbr_command'])
        logger.debug(f'bbr config is: {bbr_config}')
    else:
        logger.warning(f'No bbr configuration found')
        bbr_config = None

    # ---------- Calibration coefficients ------
    ADC_max_value = 32768.  # 16 bits, twos comp
    ADC_max_volts = 1.0  # ADC saturates at +- 1 volt

    E_coef = ADC_max_volts / ADC_max_value  # [Volts at ADC / ADC bin]
    B_coef = ADC_max_volts / ADC_max_value

    if cal_file and bbr_config:
        td_lims = [-1, 1]
        E_cal_curve = cal_data[('E', bool(bbr_config['E_FILT']),
                                bool(bbr_config['E_GAIN']))]
        B_cal_curve = cal_data[('B', bool(bbr_config['B_FILT']),
                                bool(bbr_config['B_GAIN']))]
        E_coef *= 1000.0 / max(E_cal_curve)  # [(mV/m) / Vadc]
        B_coef *= 1.0 / max(B_cal_curve)  # [(nT) / Vadc]
        E_unit_string = 'mV/m @ Antenna'
        B_unit_string = 'nT'

        logger.debug(f'E calibration coefficient is {E_coef} mV/m per bit')
        logger.debug(f'B calibration coefficient is {B_coef} nT per bit')
    else:
        E_unit_string = 'V @ ADC'
        B_unit_string = 'V @ ADC'

    # Scale the spectrograms -- A perfect sine wave will have ~-3dB amplitude.
    # Scaling covers 14 bits of dynamic range, with a maximum at each channel's theoretical peak
    clims = np.array([-6 * 14, -3])  #[-96, -20]
    e_clims = clims + 20 * np.log10(E_coef * ADC_max_value / ADC_max_volts)
    b_clims = clims + 20 * np.log10(B_coef * ADC_max_value / ADC_max_volts)

    if cfg['TD_FD_SELECT'] == 0:
        # --------- Frequency domain plots  -----------
        # fig = plt.figure()
        gs = GridSpec(2, 2, width_ratios=[20, 1], wspace=0.05, hspace=0.05)
        E_FD = fig.add_subplot(gs[0, 0])
        B_FD = fig.add_subplot(gs[1, 0], sharex=E_FD, sharey=E_FD)
        cb1 = fig.add_subplot(gs[0, 1])
        cb2 = fig.add_subplot(gs[1, 1])

        nfft = 1024

        # Frequency axis
        f_axis = []
        seg_length = nfft / 2 / 16

        for i, v in enumerate(cfg['BINS'][::-1]):
            if v == '1':
                f_axis.append([np.arange(seg_length) + seg_length * i])
        freq_inds = np.array(f_axis).ravel().astype(
            'int')  # stash the row indices here
        f_axis = (40000 / (nfft / 2)) * np.array(f_axis).ravel()
        f_axis_full = np.arange(512) * 40000 / 512

        logger.debug(f"f axis: {len(f_axis)}")

        # E and B are flattened vectors; we need to reshape them into 2d arrays (spectrograms)
        max_E = len(burst['E']) - np.mod(len(burst['E']), len(f_axis))
        E = burst['E'][0:max_E].reshape(int(max_E / len(f_axis)),
                                        len(f_axis)) * E_coef
        E = E.T
        max_B = len(burst['B']) - np.mod(len(burst['B']), len(f_axis))
        B = burst['B'][0:max_B].reshape(int(max_B / len(f_axis)),
                                        len(f_axis)) * B_coef
        B = B.T

        logger.debug(f"E dims: {np.shape(E)}, B dims: {np.shape(B)}")

        # Generate time axis
        scale_factor = nfft / 2. / 80000.

        sec_on = np.round(cfg['FFTS_ON'] * scale_factor)
        sec_off = np.round(cfg['FFTS_OFF'] * scale_factor)

        if cfg['FFTS_OFF'] == 0:
            # GPS packets are taken when stopping data capture -- e.g., at the end of the burst,
            # or transitioning to a "samples off" section. If we're doing back-to-back bursts
            # with no windowing, we'll only have one GPS timestamp instead of burst_pulses.
            max_t_ind = np.shape(E)[1]
            t_inds = np.arange(max_t_ind)
            t_axis_seconds = t_inds * scale_factor
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['G'][0]['timestamp']) - datetime.timedelta(
                    seconds=np.round(t_axis_seconds[-1]))
            t_axis_full_seconds = np.arange(
                max_t_ind) * scale_factor + system_delay_samps_FD / fs
            t_axis_full_timestamps = burst['G'][0][
                'timestamp'] - max_t_ind * scale_factor + t_axis_full_seconds

        else:
            t_inds = np.array([(np.arange(cfg['FFTS_ON'])) +
                               (k * (cfg['FFTS_ON'] + cfg['FFTS_OFF']))
                               for k in range(cfg['burst_pulses'])]).ravel()
            max_t_ind = (cfg['FFTS_ON'] +
                         cfg['FFTS_OFF']) * cfg['burst_pulses']
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['G'][0]['timestamp']) - datetime.timedelta(
                    seconds=np.round(cfg['FFTS_ON'] * scale_factor))
            t_axis_full_seconds = np.arange(
                max_t_ind) * scale_factor + system_delay_samps_FD / fs
            t_axis_full_timestamps = burst['G'][0]['timestamp'] - cfg[
                'FFTS_ON'] * scale_factor + t_axis_full_seconds

        # Spectrogram color limits
        clims = [-96, 0]

        # Log-scaled magnitudes
        Emag = 20 * np.log10(np.abs(E))
        Emag[np.isinf(Emag)] = -100
        Bmag = 20 * np.log10(np.abs(B))
        Bmag[np.isinf(Bmag)] = -100
        # print(np.max(Emag), np.max(Bmag))
        # Spaced spectrogram -- insert nans (or -120 for a blue background) in the empty spaces
        E_spec_full = -120 * np.ones([max_t_ind, 512])
        B_spec_full = -120 * np.ones([max_t_ind, 512])

        a, b = np.meshgrid(t_inds, freq_inds)

        E_spec_full[a, b] = Emag
        B_spec_full[a, b] = Bmag
        E_spec_full = E_spec_full.T
        B_spec_full = B_spec_full.T

        # Plots!
        pe = E_FD.pcolormesh(t_axis_full_timestamps,
                             f_axis_full / 1000,
                             E_spec_full,
                             cmap=cm,
                             vmin=e_clims[0],
                             vmax=e_clims[1])
        pb = B_FD.pcolormesh(t_axis_full_timestamps,
                             f_axis_full / 1000,
                             B_spec_full,
                             cmap=cm,
                             vmin=b_clims[0],
                             vmax=b_clims[1])

        # Axis labels and ticks. Label the burst start time, and the GPS timestamps.
        xtix = [t_axis_full_timestamps[0]]
        xtix.extend([x['timestamp'] for x in burst['G']])
        minorticks = np.arange(np.ceil(t_axis_full_timestamps[0]),
                               t_axis_full_timestamps[-1],
                               5)  # minor tick marks -- 5 seconds
        E_FD.set_xticks(xtix)
        E_FD.set_xticks(minorticks, minor=True)
        B_FD.set_xticks(xtix)
        B_FD.set_xticks(minorticks, minor=True)
        E_FD.set_xticklabels([])
        B_FD.set_xticklabels([
            datetime.datetime.utcfromtimestamp(x).strftime("%H:%M:%S")
            for x in xtix
        ])

        fig.autofmt_xdate()

        ce = fig.colorbar(pe, cax=cb1)
        cb = fig.colorbar(pb, cax=cb2)

        E_FD.set_ylim([0, 40])
        B_FD.set_ylim([0, 40])

        E_FD.set_ylabel('E\n Frequency [kHz]')
        B_FD.set_ylabel('B\n Frequency [kHz]')

        # ce.set_label('dBFS')
        # cb.set_label('dBFS')
        ce.set_label(f'dB[{E_unit_string}]')
        cb.set_label(f'dB[{B_unit_string}]')

        # B_FD.set_xlabel('Time [sec from start]')
        B_FD.set_xlabel("Time (H:M:S) on \n%s" %
                        start_timestamp.strftime("%Y-%m-%d"))

        # fig.suptitle(f'Burst {ind}\n{start_timestamp}')
        if bbr_config:
            fig.suptitle(
                'Frequency-Domain Burst\n%s - n = %d, %d on / %d off\nE gain = %s, E filter = %s, B gain = %s, B filter = %s'
                % (start_timestamp, cfg['burst_pulses'], sec_on, sec_off,
                   bbr_config['E_GAIN'], bbr_config['E_FILT'],
                   bbr_config['B_GAIN'], bbr_config['B_FILT']))
        else:
            fig.suptitle(
                'Frequency-Domain Burst\n%s - n = %d, %d on / %d off' %
                (start_timestamp, cfg['burst_pulses'], sec_on, sec_off))
Example #4
0
def plot_burst_TD(fig, burst, cal_data=None):

    logger = logging.getLogger("plot_burst_TD")

    # # --------------- Latex Plot Beautification --------------------------
    # fig_width = 10
    # fig_height = 8
    # fig_size =  [fig_width+1,fig_height+1]
    # params = {'backend': 'ps',
    #           'axes.labelsize': 12,
    #           'font.size': 12,
    #           'legend.fontsize': 10,
    #           'xtick.labelsize': 10,
    #           'ytick.labelsize': 10,
    #           'text.usetex': False,
    #           'figure.figsize': fig_size}
    # plt.rcParams.update(params)
    # # --------------- Latex Plot Beautification --------------------------

    # if cal_file:
    #     try:
    #         with open(cal_file,'rb') as file:
    #             logger.debug(f'loading calibration file {cal_file}')
    #             cal_data = pickle.load(file)
    #     except:
    #         logger.warning(f'Failed to load calibration file {cal_file}')
    #         cal_file = None

    # A list of bursts!
    # for ind, burst in enumerate(B_data):
    # for burst in [B_data[1]]:
    cfg = burst['config']

    logger.info(f'burst configuration: {cfg}')

    system_delay_samps_TD = 73
    system_delay_samps_FD = 200
    fs = 80000
    # cm = plt.cm.jet
    cm = parula()
    # This is a mockup of the current Matlab colormap (which is proprietary)

    # Check if we have any status packets included -- we'll get
    # the uBBR configuration from these.
    if 'bbr_config' in burst:
        bbr_config = burst['bbr_config']
    elif 'I' in burst:
        logger.debug(f"Found {len(burst['I'])} status packets")
        # Get uBBR config command:
        if 'prev_bbr_command' in burst['I'][0]:
            bbr_config = decode_uBBR_command(burst['I'][0]['prev_bbr_command'])
        else:
            ps = decode_status([burst['I'][0]])
            bbr_config = decode_uBBR_command(ps[0]['prev_bbr_command'])
        logger.debug(f'bbr config is: {bbr_config}')
    else:
        logger.warning(f'No bbr configuration found')
        bbr_config = None

    # ---------- Calibration coefficients ------
    ADC_max_value = 32768.  # 16 bits, twos comp
    ADC_max_volts = 1.0  # ADC saturates at +- 1 volt

    E_coef = ADC_max_volts / ADC_max_value  # [Volts at ADC / ADC bin]
    B_coef = ADC_max_volts / ADC_max_value

    if cal_data and bbr_config:
        td_lims = [-1, 1]
        E_cal_curve = cal_data[('E', bool(bbr_config['E_FILT']),
                                bool(bbr_config['E_GAIN']))]
        B_cal_curve = cal_data[('B', bool(bbr_config['B_FILT']),
                                bool(bbr_config['B_GAIN']))]
        E_coef *= 1000.0 / max(E_cal_curve)  # [(mV/m) / Vadc]
        B_coef *= 1.0 / max(B_cal_curve)  # [(nT) / Vadc]
        E_unit_string = 'mV/m @ Antenna'
        B_unit_string = 'nT'

        logger.debug(f'E calibration coefficient is {E_coef} mV/m per bit')
        logger.debug(f'B calibration coefficient is {B_coef} nT per bit')
    else:
        E_unit_string = 'V @ ADC'
        B_unit_string = 'V @ ADC'

    # Scale the spectrograms -- A perfect sine wave will have ~-3dB amplitude.
    # Scaling covers 14 bits of dynamic range, with a maximum at each channel's theoretical peak
    clims = np.array([-6 * 14, -3])  #[-96, -20]
    e_clims = clims + 20 * np.log10(E_coef * ADC_max_value / ADC_max_volts)
    b_clims = clims + 20 * np.log10(B_coef * ADC_max_value / ADC_max_volts)

    # Generate time axis
    if cfg['TD_FD_SELECT'] == 1:
        # --------- Time domain plots  -----------
        # fig = plt.figure()
        gs = GridSpec(2, 3, width_ratios=[20, 20, 1], wspace=0.2, hspace=0.1)
        E_TD = fig.add_subplot(gs[0, 0])
        B_TD = fig.add_subplot(gs[1, 0], sharex=E_TD)
        E_FD = fig.add_subplot(gs[0, 1], sharex=E_TD)
        B_FD = fig.add_subplot(gs[1, 1], sharex=E_FD)
        cb1 = fig.add_subplot(gs[0, 2])
        cb2 = fig.add_subplot(gs[1, 2])

        # Construct the appropriate time and frequency axes
        # Get the equivalent sample rate, if decimated
        if cfg['DECIMATE_ON'] == 1:
            fs_equiv = 80000. / cfg['DECIMATION_FACTOR']
        else:
            fs_equiv = 80000.

        if cfg['SAMPLES_OFF'] == 0:
            max_ind = max(len(burst['E']), len(burst['B']))
            t_axis = np.arange(max_ind) / fs_equiv
        else:

            # Seconds from the start of the burst
            t_axis = np.array([(np.arange(cfg['SAMPLES_ON']))/fs_equiv +\
                          (k*(cfg['SAMPLES_ON'] + cfg['SAMPLES_OFF']))/fs_equiv for k in range(cfg['burst_pulses'])]).ravel()

        # Add in system delay
        t_axis += system_delay_samps_TD / fs_equiv

        # Get the timestamp at the beginning of the burst.
        # GPS timestamps are taken at the end of each contiguous recording.
        # (I think "samples on" is still undecimated, regardless if decimation is being used...)
        start_timestamp = datetime.datetime.utcfromtimestamp(
            burst['G'][0]['timestamp']) - datetime.timedelta(
                seconds=float(cfg['SAMPLES_ON'] / fs))

        # the "samples on" and "samples off" values are counting at the full rate, not the decimated rate.
        sec_on = cfg['SAMPLES_ON'] / fs
        sec_off = cfg['SAMPLES_OFF'] / fs

        E_TD.plot(t_axis[0:len(burst['E'])], E_coef * burst['E'])
        B_TD.plot(t_axis[0:len(burst['B'])], B_coef * burst['B'])

        # E_TD.set_ylim(td_lims)
        # B_TD.set_ylim(td_lims)

        nfft = 1024
        overlap = 0.5
        window = 'hanning'

        if cfg['SAMPLES_OFF'] == 0:
            E_td_spaced = E_coef * burst['E']
            B_td_spaced = B_coef * burst['B']
        else:
            # Insert nans into vector to account for "off" time sections
            E_td_spaced = []
            B_td_spaced = []

            for k in np.arange(cfg['burst_pulses']):
                E_td_spaced.append(E_coef *
                                   burst['E'][k * cfg['SAMPLES_ON']:(k + 1) *
                                              cfg['SAMPLES_ON']])
                E_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)
                B_td_spaced.append(B_coef *
                                   burst['B'][k * cfg['SAMPLES_ON']:(k + 1) *
                                              cfg['SAMPLES_ON']])
                B_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)

            E_td_spaced = np.concatenate(E_td_spaced).ravel()
            B_td_spaced = np.concatenate(B_td_spaced).ravel()

        # E spectrogram -- "spectrum" scaling -> V^2; "density" scaling -> V^2/Hz
        ff, tt, FE = scipy.signal.spectrogram(E_td_spaced,
                                              fs=fs_equiv,
                                              window=window,
                                              nperseg=nfft,
                                              noverlap=nfft * overlap,
                                              mode='psd',
                                              scaling='spectrum')
        E_S_mag = 20 * np.log10(np.sqrt(FE))
        E_S_mag[np.isinf(E_S_mag)] = -100
        logger.debug(f'E data min/max: {np.min(E_S_mag)}, {np.max(E_S_mag)}')
        pe = E_FD.pcolorfast(tt,
                             ff / 1000,
                             E_S_mag,
                             cmap=cm,
                             vmin=e_clims[0],
                             vmax=e_clims[1])
        ce = fig.colorbar(pe, cax=cb1)

        # B spectrogram
        ff, tt, FB = scipy.signal.spectrogram(B_td_spaced,
                                              fs=fs_equiv,
                                              window=window,
                                              nperseg=nfft,
                                              noverlap=nfft * overlap,
                                              mode='psd',
                                              scaling='spectrum')
        B_S_mag = 20 * np.log10(np.sqrt(FB))
        B_S_mag[np.isinf(B_S_mag)] = -100
        logger.debug(f'B data min/max: {np.min(B_S_mag)}, {np.max(B_S_mag)}')
        pb = B_FD.pcolorfast(tt,
                             ff / 1000,
                             B_S_mag,
                             cmap=cm,
                             vmin=b_clims[0],
                             vmax=b_clims[1])
        cb = fig.colorbar(pb, cax=cb2)

        E_TD.set_ylabel(f'E Amplitude\n[{E_unit_string}]')
        B_TD.set_ylabel(f'B Amplitude\n[{B_unit_string}]')
        E_FD.set_ylabel('Frequency [kHz]')
        B_FD.set_ylabel('Frequency [kHz]')
        B_TD.set_xlabel('Time [sec from start]')
        B_FD.set_xlabel('Time [sec from start]')

        ce.set_label(f'dB[{E_unit_string}]')
        cb.set_label(f'dB[{B_unit_string}]')

        if bbr_config:
            fig.suptitle(
                'Time-Domain Burst\n%s - n = %d, %d on / %d off\nE gain = %s, E filter = %s, B gain = %s, B filter = %s'
                % (start_timestamp, cfg['burst_pulses'], sec_on, sec_off,
                   bbr_config['E_GAIN'], bbr_config['E_FILT'],
                   bbr_config['B_GAIN'], bbr_config['B_FILT']))
        else:
            fig.suptitle(
                'Time-Domain Burst\n%s - n = %d, %d on / %d off' %
                (start_timestamp, cfg['burst_pulses'], sec_on, sec_off))
def plot_burst_TD(fig, burst, cal_data=None):

    logger = logging.getLogger("plot_burst_TD")

    # # --------------- Latex Plot Beautification --------------------------
    # fig_width = 10
    # fig_height = 8
    # fig_size =  [fig_width+1,fig_height+1]
    # params = {'backend': 'ps',
    #           'axes.labelsize': 12,
    #           'font.size': 12,
    #           'legend.fontsize': 10,
    #           'xtick.labelsize': 10,
    #           'ytick.labelsize': 10,
    #           'text.usetex': False,
    #           'figure.figsize': fig_size}
    # plt.rcParams.update(params)
    # # --------------- Latex Plot Beautification --------------------------

    # if cal_file:
    #     try:
    #         with open(cal_file,'rb') as file:
    #             logger.debug(f'loading calibration file {cal_file}')
    #             cal_data = pickle.load(file)
    #     except:
    #         logger.warning(f'Failed to load calibration file {cal_file}')
    #         cal_file = None

    # A list of bursts!
    # for ind, burst in enumerate(B_data):
    # for burst in [B_data[1]]:
    cfg = burst['config']

    logger.info(f'burst configuration: {cfg}')

    system_delay_samps_TD = 73
    system_delay_samps_FD = 200
    fs = 80000
    # cm = plt.cm.jet
    cm = parula()
    # This is a mockup of the current Matlab colormap (which is proprietary)

    # Check if we have any status packets included -- we'll get
    # the uBBR configuration from these.
    if 'bbr_config' in burst:
        bbr_config = burst['bbr_config']
    elif 'I' in burst:
        logger.debug(f"Found {len(burst['I'])} status packets")
        # Get uBBR config command:
        if 'prev_bbr_command' in burst['I'][0]:
            bbr_config = decode_uBBR_command(burst['I'][0]['prev_bbr_command'])
        else:
            ps = decode_status([burst['I'][0]])
            bbr_config = decode_uBBR_command(ps[0]['prev_bbr_command'])
        logger.debug(f'bbr config is: {bbr_config}')
    else:
        logger.warning(f'No bbr configuration found')
        bbr_config = None

    # ---------- Calibration coefficients ------
    """
    ADC_max_value = 32768. # 16 bits, twos comp
    ADC_max_volts = 1.0    # ADC saturates at +- 1 volt

    E_coef = ADC_max_volts/ADC_max_value  # [Volts at ADC / ADC bin]
    B_coef = ADC_max_volts/ADC_max_value

    if cal_data and bbr_config:
        td_lims = [-1, 1]
        E_cal_curve = cal_data[('E',bool(bbr_config['E_FILT']), bool(bbr_config['E_GAIN']))]
        B_cal_curve = cal_data[('B',bool(bbr_config['B_FILT']), bool(bbr_config['B_GAIN']))]
        E_coef *= 1000.0/max(E_cal_curve) # [(mV/m) / Vadc]
        B_coef *= 1.0/max(B_cal_curve) # [(nT) / Vadc]
        E_unit_string = 'mV/m @ Antenna'
        B_unit_string = 'nT'

        logger.debug(f'E calibration coefficient is {E_coef} mV/m per bit')
        logger.debug(f'B calibration coefficient is {B_coef} nT per bit')
    else:
        E_unit_string = 'V @ ADC'
        B_unit_string = 'V @ ADC'
    """
    E_unit_string = 'uV/m'  # now calibrated to uV/m

    # Scale the spectrograms -- A perfect sine wave will have ~-3dB amplitude.
    # Scaling covers 14 bits of dynamic range, with a maximum at each channel's theoretical peak
    #clims = np.array([-6*14, -3]) #[-96, -20]
    #e_clims = clims + 20*np.log10(E_coef*ADC_max_value/ADC_max_volts)
    #b_clims = clims + 20*np.log10(B_coef*ADC_max_value/ADC_max_volts)

    e_clims = np.array([-40, 10])  # for newly calibrated data

    E_coef = burst['CAL']  # calibrate into uV/m units
    #print(E_coef)
    B_coef = 1  # just so we don't have to comment a bunch of stuff out

    # Generate time axis
    if cfg['TD_FD_SELECT'] == 1:
        # --------- Time domain plots  -----------
        # fig = plt.figure()
        fig.set_size_inches(10, 8)
        #gs = GridSpec(2, 2, height_ratios=[1.25,1], wspace = 0.2, hspace = 0.25)
        gs = GridSpec(1, 1)

        #E_TD = fig.add_subplot(gs[0,0])
        # B_TD = fig.add_subplot(gs[1,0], sharex=E_TD)
        E_FD = fig.add_subplot(gs[0])
        # B_FD = fig.add_subplot(gs[1,1], sharex=E_FD)
        # cb1  = fig.add_subplot(gs[0,2])
        # cb2  = fig.add_subplot(gs[1,2])

        # add in burst map to plot -- werid workaround but it fixed it
        #map_ax = fig.add_subplot(gs[1,0:2])
        #box = map_ax.get_position()
        #box.x0 = box.x0 - 0.13
        #box.x1 = box.x1 - 0.13
        #map_ax.set_position(box)
        #gstr = plot_burst_map(map_ax, burst['G'], burst)

        #fig.text(0.68, 0.12, gstr, fontsize='9') # ha='center', va='bottom')

        # Construct the appropriate time and frequency axes
        # Get the equivalent sample rate, if decimated
        if cfg['DECIMATE_ON'] == 1:
            fs_equiv = 80000. / cfg['DECIMATION_FACTOR']
        else:
            fs_equiv = 80000.

        if cfg['SAMPLES_OFF'] == 0:
            max_ind = max(len(burst['E']), len(burst['B']))
            t_axis = np.arange(max_ind) / fs_equiv
        else:

            # Seconds from the start of the burst
            t_axis = np.array([(np.arange(cfg['SAMPLES_ON']))/fs_equiv +\
                          (k*(cfg['SAMPLES_ON'] + cfg['SAMPLES_OFF']))/fs_equiv for k in range(cfg['burst_pulses'])]).ravel()
        #print(len(burst['E']))

        # Add in system delay
        t_axis += system_delay_samps_TD / fs_equiv

        # Get the timestamp at the beginning of the burst.
        # GPS timestamps are taken at the end of each contiguous recording.
        # (I think "samples on" is still undecimated, regardless if decimation is being used...)
        try:
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['G'][0]['timestamp']) - datetime.timedelta(
                    seconds=float(cfg['SAMPLES_ON'] / fs))
            #print(start_timestamp)
        except:
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['header_timestamp'])

        #if start_timestamp.year == 1980:
        #    # error in GPS?
        #    start_timestamp = datetime.datetime.utcfromtimestamp(burst['header_timestamp'])

        # the "samples on" and "samples off" values are counting at the full rate, not the decimated rate.
        sec_on = cfg['SAMPLES_ON'] / fs
        sec_off = cfg['SAMPLES_OFF'] / fs

        # add in a signal - 20 uV/m
        extra_signal = 0.3 * np.sin(np.array(t_axis) * 2 * np.pi * 3e3)
        #E_TD.plot(t_axis[0:len(burst['E'])], (E_coef*burst['E'])+extra_signal)
        #E_TD.plot(t_axis[0:len(burst['E'])], E_coef*burst['E'][0:len(t_axis)])
        # B_TD.plot(t_axis[0:len(burst['B'])], B_coef*burst['B'])

        # E_TD.set_ylim(td_lims)
        # B_TD.set_ylim(td_lims)

        nfft = 1024
        overlap = 0.5
        window = 'hanning'

        if cfg['SAMPLES_OFF'] == 0:
            E_td_spaced = E_coef * burst['E']
            B_td_spaced = B_coef * burst['B']
        else:
            # Insert nans into vector to account for "off" time sections
            E_td_spaced = []
            B_td_spaced = []

            for k in np.arange(cfg['burst_pulses']):
                #if k ==2:
                the_data = E_coef * burst['E'] + extra_signal[:len(burst['E'])]
                E_td_spaced.append(the_data[k * cfg['SAMPLES_ON']:(k + 1) *
                                            cfg['SAMPLES_ON']])
                E_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)

                B_td_spaced.append(B_coef *
                                   burst['B'][k * cfg['SAMPLES_ON']:(k + 1) *
                                              cfg['SAMPLES_ON']])
                B_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)

            E_td_spaced = np.concatenate(E_td_spaced).ravel()
            B_td_spaced = np.concatenate(B_td_spaced).ravel()

        # E spectrogram -- "spectrum" scaling -> V^2; "density" scaling -> V^2/Hz
        ff, tt, FE = scipy.signal.spectrogram(
            E_td_spaced,
            fs=fs_equiv,
            window=window,
            nperseg=nfft,
            noverlap=nfft * overlap,
            mode='psd',
            scaling='density')  # changed to density
        E_S_mag = 20 * np.log10(np.sqrt(FE))
        E_S_mag[np.isinf(E_S_mag)] = -100
        logger.debug(f'E data min/max: {np.min(E_S_mag)}, {np.max(E_S_mag)}')
        # what does pe do?
        pe = E_FD.pcolorfast(tt,
                             ff / 1000,
                             E_S_mag,
                             cmap=cm,
                             vmin=e_clims[0],
                             vmax=e_clims[1])
        cax_divider = make_axes_locatable(E_FD)
        ce_ax = cax_divider.append_axes('right', size='7%', pad='5%')
        ce = fig.colorbar(pe, cax=ce_ax)

        # save output
        #print('FE', np.shape(FE))
        #print('tt', np.shape(tt))
        #print('ff', np.shape(ff))

        #np.savetxt('burstE.txt', FE, delimiter=",")
        #np.savetxt('burstT.txt', tt, delimiter=",")
        #np.savetxt('burstF.txt', ff, delimiter=",")
        #print('E', np.shape(Eoutput))

        # B spectrogram
        ff, tt, FB = scipy.signal.spectrogram(B_td_spaced,
                                              fs=fs_equiv,
                                              window=window,
                                              nperseg=nfft,
                                              noverlap=nfft * overlap,
                                              mode='psd',
                                              scaling='spectrum')
        B_S_mag = 20 * np.log10(np.sqrt(FB))
        B_S_mag[np.isinf(B_S_mag)] = -100
        logger.debug(f'B data min/max: {np.min(B_S_mag)}, {np.max(B_S_mag)}')
        # pb = B_FD.pcolorfast(tt,ff/1000, B_S_mag, cmap = cm, vmin=b_clims[0], vmax=b_clims[1])
        # cb = fig.colorbar(pb, cax=cb2)

        #E_TD.set_ylabel(f'E Amplitude\n[{E_unit_string}]')
        # B_TD.set_ylabel(f'B Amplitude\n[{B_unit_string}]')
        E_FD.set_ylabel('Frequency [kHz]')
        # B_FD.set_ylabel('Frequency [kHz]')
        #E_TD.set_xlabel('Time [sec from start]')
        E_FD.set_xlabel('Time [sec from start]')
        #E_TD.set_xlim([0,20])

        ce.set_label(f'dB[(uV/m)^2/Hz]')
        # cb.set_label(f'dB[{B_unit_string}]')

        #f start_timestamp.year == 1980:
        #        start_timestamp = datetime.datetime.utcfromtimestamp(burst['header_timestamp'])
        start_timestamp = start_timestamp.replace(microsecond=0)
        if bbr_config:
            fig.suptitle(
                'VPM Burst Data\n%s - n = %d, %d on / %d off\nE gain = %s, E filter = %s'
                % (start_timestamp, cfg['burst_pulses'], sec_on, sec_off,
                   burst['GAIN'], burst['FILT']))
        else:
            fig.suptitle(
                'VPM Burst Data\n%s - n = %d, %d on / %d off' %
                (start_timestamp, cfg['burst_pulses'], sec_on, sec_off))
        #E_FD.set_xlim([0,6])
        fig.savefig('burst.svg', format='svg')
def plot_burst_incomplete(fig, burst, cal_data=None):

    logger = logging.getLogger("plot_burst_TD")

    cfg = burst['config']

    logger.info(f'burst configuration: {cfg}')

    system_delay_samps_TD = 73
    system_delay_samps_FD = 200
    fs = 80000
    # cm = plt.cm.jet
    cm = parula()
    # This is a mockup of the current Matlab colormap (which is proprietary)

    # Check if we have any status packets included -- we'll get
    # the uBBR configuration from these.
    if 'bbr_config' in burst:
        bbr_config = burst['bbr_config']
    elif 'I' in burst:
        logger.debug(f"Found {len(burst['I'])} status packets")
        # Get uBBR config command:
        if 'prev_bbr_command' in burst['I'][0]:
            bbr_config = decode_uBBR_command(burst['I'][0]['prev_bbr_command'])
        else:
            ps = decode_status([burst['I'][0]])
            bbr_config = decode_uBBR_command(ps[0]['prev_bbr_command'])
        logger.debug(f'bbr config is: {bbr_config}')
    else:
        logger.warning(f'No bbr configuration found')
        bbr_config = None

    E_unit_string = 'uV/m'  # now calibrated to uV/m

    # Scale the spectrograms -- A perfect sine wave will have ~-3dB amplitude.
    # Scaling covers 14 bits of dynamic range, with a maximum at each channel's theoretical peak
    #clims = np.array([-6*14, -3]) #[-96, -20]
    #e_clims = clims + 20*np.log10(E_coef*ADC_max_value/ADC_max_volts)
    #b_clims = clims + 20*np.log10(B_coef*ADC_max_value/ADC_max_volts)

    e_clims = np.array([-40, 10])  # for newly calibrated data

    E_coef = burst['CAL']  # calibrate into uV/m units
    #print(E_coef)
    B_coef = 1  # just so we don't have to comment a bunch of stuff out

    # Generate time axis
    if cfg['TD_FD_SELECT'] == 1:
        # --------- Time domain plots  -----------
        # fig = plt.figure()
        fig.set_size_inches(10, 8)
        gs = GridSpec(2, 2, height_ratios=[1.25, 1], wspace=0.2, hspace=0.25)
        E_TD = fig.add_subplot(gs[0, 0])
        # B_TD = fig.add_subplot(gs[1,0], sharex=E_TD)
        E_FD = fig.add_subplot(gs[0, 1], sharex=E_TD)
        # B_FD = fig.add_subplot(gs[1,1], sharex=E_FD)
        # cb1  = fig.add_subplot(gs[0,2])
        # cb2  = fig.add_subplot(gs[1,2])

        # add in burst map to plot -- werid workaround but it fixed it
        map_ax = fig.add_subplot(gs[1, 0:2])
        box = map_ax.get_position()
        box.x0 = box.x0 - 0.13
        box.x1 = box.x1 - 0.13
        map_ax.set_position(box)
        gstr = plot_burst_map(map_ax, burst['G'], burst)

        fig.text(0.68, 0.25, gstr, fontsize='9')  # ha='center', va='bottom')

        # Construct the appropriate time and frequency axes
        # Get the equivalent sample rate, if decimated
        if cfg['DECIMATE_ON'] == 1:
            fs_equiv = 80000. / cfg['DECIMATION_FACTOR']
        else:
            fs_equiv = 80000.

        if cfg['SAMPLES_OFF'] == 0:
            max_ind = max(len(burst['E']), len(burst['B']))
            t_axis = np.arange(max_ind) / fs_equiv
        else:

            # Seconds from the start of the burst
            t_axis = np.array([(np.arange(cfg['SAMPLES_ON']))/fs_equiv +\
                          (k*(cfg['SAMPLES_ON'] + cfg['SAMPLES_OFF']))/fs_equiv for k in range(cfg['burst_pulses'])]).ravel()
        #print(len(burst['E']))

        # Add in system delay
        t_axis += system_delay_samps_TD / fs_equiv

        # Get the timestamp at the beginning of the burst.
        # GPS timestamps are taken at the end of each contiguous recording.
        # (I think "samples on" is still undecimated, regardless if decimation is being used...)
        try:
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['G'][0]['timestamp']) - datetime.timedelta(
                    seconds=float(cfg['SAMPLES_ON'] / fs))
            #print(start_timestamp)
        except:
            start_timestamp = datetime.datetime.utcfromtimestamp(
                burst['header_timestamp'])

        #if start_timestamp.year == 1980:
        #    # error in GPS?
        #    start_timestamp = datetime.datetime.utcfromtimestamp(burst['header_timestamp'])

        # the "samples on" and "samples off" values are counting at the full rate, not the decimated rate.
        sec_on = cfg['SAMPLES_ON'] / fs
        sec_off = cfg['SAMPLES_OFF'] / fs

        #E_TD.plot(t_axis[0:len(burst['E'])], E_coef*burst['E'])
        E_TD.plot(t_axis[0:len(burst['E'])],
                  E_coef * burst['E'][0:len(t_axis)])
        # B_TD.plot(t_axis[0:len(burst['B'])], B_coef*burst['B'])

        # E_TD.set_ylim(td_lims)
        # B_TD.set_ylim(td_lims)

        nfft = 1024
        overlap = 0.5
        window = 'hanning'

        if cfg['SAMPLES_OFF'] == 0:
            E_td_spaced = E_coef * burst['E']
            B_td_spaced = B_coef * burst['B']
        else:
            # Insert nans into vector to account for "off" time sections
            E_td_spaced = []
            B_td_spaced = []

            for k in np.arange(cfg['burst_pulses']):
                E_td_spaced.append(E_coef *
                                   burst['E'][k * cfg['SAMPLES_ON']:(k + 1) *
                                              cfg['SAMPLES_ON']])
                E_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)
                B_td_spaced.append(B_coef *
                                   burst['B'][k * cfg['SAMPLES_ON']:(k + 1) *
                                              cfg['SAMPLES_ON']])
                B_td_spaced.append(np.ones(cfg['SAMPLES_OFF']) * np.nan)

            E_td_spaced = np.concatenate(E_td_spaced).ravel()
            B_td_spaced = np.concatenate(B_td_spaced).ravel()

        # E spectrogram -- "spectrum" scaling -> V^2; "density" scaling -> V^2/Hz
        ff, tt, FE = scipy.signal.spectrogram(
            E_td_spaced,
            fs=fs_equiv,
            window=window,
            nperseg=nfft,
            noverlap=nfft * overlap,
            mode='psd',
            scaling='density')  # changed to density
        E_S_mag = 20 * np.log10(np.sqrt(FE))
        E_S_mag[np.isinf(E_S_mag)] = -100

        logger.debug(f'E data min/max: {np.min(E_S_mag)}, {np.max(E_S_mag)}')

        E_FD.plot(ff / 1000, E_S_mag)
        # what does pe do?
        #pe = E_FD.pcolorfast(tt,ff/1000,E_S_mag, cmap = cm,  vmin=e_clims[0], vmax=e_clims[1])
        #cax_divider = make_axes_locatable(E_FD)
        #ce_ax = cax_divider.append_axes('right', size='7%', pad='5%')
        #ce = fig.colorbar(pe, cax=ce_ax)

        E_TD.set_ylabel(f'E Amplitude\n[{E_unit_string}]')
        E_FD.set_ylabel(f'E Amplitude\n[{E_unit_string}]')
        E_TD.set_xlabel('Time [sec from start]')
        E_FD.set_xlabel('Frequency [kHz]')

        #ce.set_label(f'dB[(uV/m)^2/Hz]')

        #f start_timestamp.year == 1980:
        #        start_timestamp = datetime.datetime.utcfromtimestamp(burst['header_timestamp'])
        start_timestamp = start_timestamp.replace(microsecond=0)
        if bbr_config:
            fig.suptitle(
                'VPM Burst Data\n%s - n = %d, %d on / %d off\nE gain = %s, E filter = %s'
                % (start_timestamp, cfg['burst_pulses'], sec_on, sec_off,
                   burst['GAIN'], burst['FILT']))
        else:
            fig.suptitle(
                'VPM Burst Data\n%s - n = %d, %d on / %d off' %
                (start_timestamp, cfg['burst_pulses'], sec_on, sec_off))