def plot_cpu(cpu):
            name = f'CPU{cpu} util'
            series = util_df[cpu].copy(deep=False)
            series.index.name = 'Time'
            series.name = name
            fig = plot_signal(series).options(
                'Curve',
                ylabel='Utilization',
            )

            # The "y" dimension has the name of the series that we plotted
            fig = fig.redim.range(**{name: (-10, 1034)})

            times, utils = zip(*series.items())
            fig *= hv.Overlay([
                hv.VSpan(start, end).options(
                    alpha=0.1,
                    color='grey',
                ) for util, start, end in zip(
                    utils,
                    times,
                    times[1:],
                ) if not util
            ])
            return fig
示例#2
0
    def plot_phases(self, task: TaskID, wlgen_profile=None):
        """
        Draw the task's phases colored bands

        :param task: the rt-app task to filter for
        :type task: int or str or lisa.trace.TaskID

        :param wlgen_profile: See :meth:`df_rtapp_loop`
        :type wlgen_profile: dict(str, lisa.wlgen.rta.RTAPhase) or None
        """
        phases_df = self.df_phases(task, wlgen_profile=wlgen_profile)

        try:
            states_df = self.ana.tasks.df_task_states(task)
        except MissingTraceEventError:

            def cpus_of_phase_at(t):
                return []
        else:

            def cpus_of_phase_at(t):
                end = t + phases_df['duration'][t]
                window = (t, end)
                df = df_window(states_df, window, method='pre')
                return sorted(int(x) for x in df['cpu'].unique())

        def make_band(row):
            t = row.name
            end = t + row['duration']
            phase = str(row['phase'])
            return (phase, t, end, cpus_of_phase_at(t))

        # Compute phases intervals
        bands = phases_df.apply(make_band, axis=1)

        def make_label(cpus, phase):
            if cpus:
                cpus = f" (CPUs {', '.join(map(str, cpus))})"
            else:
                cpus = ''
            return f'rt-app phase {phase}{cpus}'

        return hv.Overlay([
            hv.VSpan(start, end, label=make_label(cpus,
                                                  phase)).options(alpha=0.1, )
            for phase, start, end, cpus in bands
        ]).options(title=f'Task {task} phases')
示例#3
0
        def plot_bands(df, column, label):
            df = df_refit_index(df, window=self.trace.window)
            if df.empty:
                return _hv_neutral()

            return hv.Overlay(
                [
                    hv.VSpan(
                        start,
                        start + duration,
                        label=label,
                    ).options(
                        alpha=0.5,
                    )
                    for start, duration in df[[column]].itertuples()
                ]
            )
示例#4
0
 def plot_annotations(self, **kwargs):
     flag = [
         str(i) == str(self.mission_id)
         for i in self.annotations.mission_id.values
     ]
     rows = self.annotations[flag].iterrows()
     plots = []
     # We remember the first double-click and draw a vertical line if we expect another click to happen
     if self.pending_start:
         plots.append(hv.VLine(self.pending_start).opts(line_dash="dashed"))
     plots.extend([
         hv.VSpan(r["start_clock_ms"], r["end_clock_ms"]).opts(
             color=color_dict.get(r["classification"], "yellow"))  #*
         #hv.Text((r["start_clock_ms"]+r["end_clock_ms"])/2,0.9,str(r["classification"])).opts(color="red")
         for ix, r in rows
     ])
     return hv.Overlay(plots)
示例#5
0
    def plot_overutilized(self):
        """
        Draw the system's overutilized status as colored bands
        """
        df = self.df_overutilized()
        if not df.empty:
            df = df_refit_index(df, window=self.trace.window)

            # Compute intervals in which the system is reported to be overutilized
            return hv.Overlay([
                hv.VSpan(start, start + delta, label='Overutilized').options(
                    color='red',
                    alpha=0.05,
                ) for start, delta, overutilized in df[
                    ['len', 'overutilized']].itertuples() if overutilized
            ]).options(title='System-wide overutilized status')
        else:
            return _hv_neutral()
示例#6
0
    def plot_latencies_cdf(self, task: TaskID, wakeup: bool=True, preempt: bool=True,
            threshold_ms: float=1):
        """
        Plot the latencies Cumulative Distribution Function of a task

        :param task: The task's name or PID
        :type task: int or str or tuple(int, str)

        :param wakeup: Whether to plot wakeup latencies
        :type wakeup: bool

        :param preempt: Whether to plot preemption latencies
        :type preempt: bool

        :param threshold_ms: The latency threshold to plot
        :type threshold_ms: int or float
        """

        df = self._get_latencies_df(task, wakeup, preempt)
        threshold_s = threshold_ms / 1e3
        cdf, above, below = self._get_cdf(df['latency'], threshold_s)

        return (
            hv.Curve(cdf, label='CDF') *
            self._plot_threshold(
                below,
                label=f"Latencies below {threshold_ms}ms",
            ) *
            hv.VSpan(
                0, threshold_s,
                label=f"{threshold_ms}ms threshold zone",
            ).options(
                alpha=0.5,
                color=self.LATENCY_THRESHOLD_ZONE_COLOR,
            )
        ).options(
            title=f'Latencies CDF of task "{task}"',
            xlabel="Latency (s)",
            ylabel="Latencies below the x value (%)",
        )
示例#7
0
    def plot_latencies_histogram(self, task: TaskID, wakeup: bool=True,
            preempt: bool=True, threshold_ms: float=1, bins: int=64):
        """
        Plot the latencies histogram of a task

        :param task: The task's name or PID
        :type task: int or str or tuple(int, str)

        :param wakeup: Whether to plot wakeup latencies
        :type wakeup: bool

        :param preempt: Whether to plot preemption latencies
        :type preempt: bool

        :param threshold_ms: The latency threshold to plot
        :type threshold_ms: int or float
        """

        df = self._get_latencies_df(task, wakeup, preempt)
        threshold_s = threshold_ms / 1e3
        name = f'Latencies histogram of task {task}'
        return (
            hv.Histogram(
                np.histogram(df['latency'], bins=bins),
                label=name,
            ) *
            hv.VSpan(
                0, threshold_s,
                label=f"{threshold_ms}ms threshold zone",
            ).options(
                color=self.LATENCY_THRESHOLD_ZONE_COLOR,
                alpha=0.5,
            )
        ).options(
            xlabel='Latency (s)',
            title=name,
        )
示例#8
0
    def view(self):
        if self.lsd is None:
            return panel.pane.Markdown("No data selected.")
        try:
            if self.intercylinder_only:
                name = "ringmap_intercyl"
            else:
                name = "ringmap"
            container = self.data.load_file(self.revision, self.lsd, name)
        except DataError as err:
            return panel.pane.Markdown(
                f"Error: {str(err)}. Please report this problem."
            )

        # Index map for ra (x-axis)
        index_map_ra = container.index_map["ra"]
        axis_name_ra = "RA [degrees]"

        # Index map for sin(ZA)/sin(theta) (y-axis)
        index_map_el = container.index_map["el"]
        axis_name_el = "sin(\u03B8)"

        # Apply data selections
        sel_beam = np.where(container.index_map["beam"] == self.beam)[0]
        sel_freq = np.where(
            [f[0] for f in container.index_map["freq"]] == self.frequency
        )[0]
        if self.polarization == self.mean_pol_text:
            sel_pol = np.where(
                (container.index_map["pol"] == "XX")
                | (container.index_map["pol"] == "YY")
            )[0]
            rmap = np.squeeze(container.map[sel_beam, sel_pol, sel_freq])
            rmap = np.nanmean(rmap, axis=0)
        else:
            sel_pol = np.where(container.index_map["pol"] == self.polarization)[0]
            rmap = np.squeeze(container.map[sel_beam, sel_pol, sel_freq])

        if self.flag_mask:
            rmap = np.where(self._flags_mask(container.index_map["ra"]), np.nan, rmap)

        if self.weight_mask:
            try:
                rms = np.squeeze(container.rms[sel_pol, sel_freq])
            except IndexError:
                logger.error(
                    f"rms dataset of ringmap file for rev {self.revision} lsd "
                    f"{self.lsd} is missing [{sel_pol}, {sel_freq}] (polarization, "
                    f"frequency). rms has shape {container.rms.shape}"
                )
                self.weight_mask = False
            else:
                rmap = np.where(self._weights_mask(rms), np.nan, rmap)

        # Set flagged data to nan
        rmap = np.where(rmap == 0, np.nan, rmap)

        if self.crosstalk_removal:
            # The mean of an all-nan slice (masked?) is nan. We don't need a warning about that.
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", r"All-NaN slice encountered")
                rmap -= np.nanmedian(rmap, axis=0)

        if self.template_subtraction:
            try:
                rm_stack = self.data.load_file_from_path(
                    self._stack_path, ccontainers.RingMap
                )
            except DataError as err:
                return panel.pane.Markdown(
                    f"Error: {str(err)}. Please report this problem."
                )

            # The stack file has all polarizations, so we can't reuse sel_pol
            if self.polarization == self.mean_pol_text:
                stack_sel_pol = np.where(
                    (rm_stack.index_map["pol"] == "XX")
                    | (rm_stack.index_map["pol"] == "YY")
                )[0]
            else:
                stack_sel_pol = np.where(
                    rm_stack.index_map["pol"] == self.polarization
                )[0]

            try:
                rm_stack = np.squeeze(rm_stack.map[sel_beam, stack_sel_pol, sel_freq])
            except IndexError as err:
                logger.error(
                    f"map dataset of ringmap stack file "
                    f"is missing [{sel_beam}, {stack_sel_pol}, {sel_freq}] (beam, polarization, "
                    f"frequency). map has shape {rm_stack.map.shape}:\n{err}"
                )
                self.template_subtraction = False
            else:
                if self.polarization == self.mean_pol_text:
                    rm_stack = np.nanmean(rm_stack, axis=0)

                # FIXME: this is a hack. remove when rinmap stack file fixed.
                rmap -= rm_stack.reshape(rm_stack.shape[0], -1, 2).mean(axis=-1)

        if self.transpose:
            rmap = rmap.T
            index_x = index_map_ra
            index_y = index_map_el
            axis_names = [axis_name_ra, axis_name_el]
            xlim, ylim = self.ylim, self.xlim
        else:
            index_x = index_map_el
            index_y = index_map_ra
            axis_names = [axis_name_el, axis_name_ra]
            xlim, ylim = self.xlim, self.ylim

        img = hv.Image(
            (index_x, index_y, rmap),
            datatype=["image", "grid"],
            kdims=axis_names,
        ).opts(
            clim=self.colormap_range,
            logz=self.logarithmic_colorscale,
            cmap=process_cmap("inferno", provider="matplotlib"),
            colorbar=True,
            xlim=xlim,
            ylim=ylim,
        )

        if self.serverside_rendering is not None:
            # set colormap
            cmap_inferno = copy.copy(matplotlib_cm.get_cmap("inferno"))
            cmap_inferno.set_under("black")
            cmap_inferno.set_bad("lightgray")

            # Set z-axis normalization (other possible values are 'eq_hist', 'cbrt').
            if self.logarithmic_colorscale:
                normalization = "log"
            else:
                normalization = "linear"

            # datashade/rasterize the image
            img = self.serverside_rendering(
                img,
                cmap=cmap_inferno,
                precompute=True,
                x_range=xlim,
                y_range=ylim,
                normalization=normalization,
            )

        if self.mark_moon:
            # Put a ring around the location of the moon if it transits on this day
            eph = skyfield_wrapper.ephemeris

            # Start and end times of the CSD
            st = csd_to_unix(self.lsd.lsd)
            et = csd_to_unix(self.lsd.lsd + 1)

            moon_time, moon_dec = chime.transit_times(
                eph["moon"], st, et, return_dec=True
            )

            if len(moon_time):
                lunar_transit = unix_to_csd(moon_time[0])
                lunar_dec = moon_dec[0]
                lunar_ra = (lunar_transit % 1) * 360.0
                lunar_za = np.sin(np.radians(lunar_dec - 49.0))
                if self.transpose:
                    img *= hv.Ellipse(lunar_ra, lunar_za, (5.5, 0.15))
                else:
                    img *= hv.Ellipse(lunar_za, lunar_ra, (0.04, 21))

        if self.mark_day_time:
            # Calculate the sun rise/set times on this sidereal day

            # Start and end times of the CSD
            start_time = csd_to_unix(self.lsd.lsd)
            end_time = csd_to_unix(self.lsd.lsd + 1)

            times, rises = chime.rise_set_times(
                skyfield_wrapper.ephemeris["sun"],
                start_time,
                end_time,
                diameter=-10,
            )
            sun_rise = 0
            sun_set = 0
            for t, r in zip(times, rises):
                if r:
                    sun_rise = (unix_to_csd(t) % 1) * 360
                else:
                    sun_set = (unix_to_csd(t) % 1) * 360

            # Highlight the day time data
            opts = {
                "color": "grey",
                "alpha": 0.5,
                "line_width": 1,
                "line_color": "black",
                "line_dash": "dashed",
            }
            if self.transpose:
                if sun_rise < sun_set:
                    img *= hv.VSpan(sun_rise, sun_set).opts(**opts)
                else:
                    img *= hv.VSpan(self.ylim[0], sun_set).opts(**opts)
                    img *= hv.VSpan(sun_rise, self.ylim[1]).opts(**opts)

            else:
                if sun_rise < sun_set:
                    img *= hv.HSpan(sun_rise, sun_set).opts(**opts)
                else:
                    img *= hv.HSpan(self.ylim[0], sun_set).opts(**opts)
                    img *= hv.HSpan(sun_rise, self.ylim[1]).opts(**opts)

        img.opts(
            # Fix height, but make width responsive
            height=self.height,
            responsive=True,
            shared_axes=True,
            bgcolor="lightgray",
        )

        return panel.Row(img, width_policy="max")
    def load_raster(self):
        nr_units = get_number_of_units_for_patient(self.patient_id)
        neural_rec_time = get_neural_rectime_of_patient(
            self.patient_id, self.session_nr) / 1000
        data_array = []
        for i in range(nr_units):
            spikes = get_spiking_activity(self.patient_id, self.session_nr, i)
            # delete downloaded file from working directory
            if os.path.exists("neural_rec_time.npy"):
                os.remove("neural_rec_time.npy")
            data_array.append(list(np.array(spikes) - neural_rec_time[0]))

        ret = []
        for i in range(len(data_array)):
            # i is unit ID
            for j in data_array[i]:
                # j is spike time
                ret.append((j, i))

        scatter = hv.Scatter(ret)

        # Toggle variable decimate_plot to specify whether you'd like to use decimate
        # Decimate only plots a maximum number of elements at each zoom step, this way the plot is much faster
        # If decimate is not activated, the plot is clean, but very slow
        decimate_plot = True
        if decimate_plot:
            scatter = scatter.opts(
                color='blue',
                marker='dash',
                size=12,
                alpha=1,
                line_width=0.6,
                angle=90,
                xlabel='Time from beginning of recording in milliseconds',
                ylabel='Unit ID')
            # adjust the max_samples parameter if the plot is too slow or if you think it can handle even more spikes
            raster = decimate(scatter, max_samples=40000).opts(
                width=1500, height=800) * boxes
        else:
            scatter = scatter.opts(color='blue',
                                   marker='dash',
                                   size=12,
                                   alpha=1,
                                   line_width=0.2,
                                   angle=90)
            raster = scatter.opts(
                width=1500,
                height=800,
                xlabel='Time from beginning of recording in milliseconds',
                ylabel='Unit ID') * boxes

        # extracting necessary information from the database
        start_times_pauses = \
        (MoviePauses() & "patient_id={}".format(self.patient_id) & "session_nr={}".format(self.session_nr)).fetch(
            "start_times")[0]
        stop_times_pauses = \
        (MoviePauses() & "patient_id={}".format(self.patient_id) & "session_nr={}".format(self.session_nr)).fetch(
            "stop_times")[0]
        start_times_pauses = start_times_pauses - neural_rec_time[0]
        stop_times_pauses = stop_times_pauses - neural_rec_time[0]

        start_times_skips = \
        (MovieSkips() & "patient_id={}".format(self.patient_id) & "session_nr={}".format(self.session_nr)).fetch("start_times")[0]
        stop_times_skips = \
        (MovieSkips() & "patient_id={}".format(self.patient_id) & "session_nr={}".format(self.session_nr)).fetch("stop_times")[0]
        start_times_skips = start_times_skips - neural_rec_time[0]
        stop_times_skips = stop_times_skips - neural_rec_time[0]

        # The user can select a region which should be highlighted on top of the raster plot.
        # Here every option of the highlights variable from above has to be implemented
        if self.highlight == "Pauses":
            ov = hv.NdOverlay({
                i: hv.VSpan(start_times_pauses[i],
                            stop_times_pauses[i]).opts(color='orange',
                                                       alpha=0.4)
                for i in range(len(start_times_pauses))
            })

        if self.highlight == "Skips":
            ov = hv.NdOverlay({
                i: hv.VSpan(start_times_skips[i],
                            stop_times_skips[i]).opts(color='green', alpha=0.4)
                for i in range(len(start_times_skips))
            })

        if self.highlight == 'None':
            ov = hv.NdOverlay({
                i: hv.VSpan(0, 0).opts(color='white', alpha=0)
                for i in range(1)
            })

        return raster * ov.opts(framewise=True)
示例#10
0

# ----------------------------------------------------------------------
# For Widget. 
# ---------------------------------------------------------------------- 

FORMAT_WIDGET_OPTIONS_TITLE_CASE = lambda x: x.replace('_', ' ').title()
FORMAT_WIDGET_OPTIONS_LOWERCASE = lambda x: x.replace('_', ' ').lower()


# ----------------------------------------------------------------------
# For Including Span 
# ---------------------------------------------------------------------- 

# Period / Span for rinancial trouble / recession / crisis. 
DEBT_CRISIS_2008 = hv.VSpan(datetime(2007,12,1), datetime(2009,6,1)).opts(line_width=1, color='lightgray')
DOT_COM_2001 = hv.VSpan(datetime(2001,3,1), datetime(2001,11,1)).opts(line_width=1, color='lightgray')
TROUBLE_1990 = hv.VSpan(datetime(1990,7,1), datetime(1991,3,1)).opts(line_width=1, color='lightgray')
TROUBLE_1982 = hv.VSpan(datetime(1981,11,1), datetime(1982,7,1)).opts(line_width=1, color='lightgray')
TROUBLE_1980 = hv.VSpan(datetime(1980,1,1), datetime(1980,7,1)).opts(line_width=1, color='lightgray')
TROUBLE_1974 = hv.VSpan(datetime(1973,11,1), datetime(1975,3,1)).opts(line_width=1, color='lightgray')
TROUBLE_1970 = hv.VSpan(datetime(1969,12,1), datetime(1970,11,1)).opts(line_width=1, color='lightgray')
TROUBLE_1960 = hv.VSpan(datetime(1960,4,1), datetime(1961,2,1)).opts(line_width=1, color='lightgray')
TROUBLE_1957 = hv.VSpan(datetime(1957,8,1), datetime(1958,4,1)).opts(line_width=1, color='lightgray')
TROUBLE_1953 = hv.VSpan(datetime(1953,7,1), datetime(1954,5,1)).opts(line_width=1, color='lightgray')
TROUBLE_1949 = hv.VSpan(datetime(1948,11,1), datetime(1949,10,1)).opts(line_width=1, color='lightgray')

# Combine the span. 
V_SPAN_ECO_RECESSION = (
    DEBT_CRISIS_2008 * DOT_COM_2001 \
    * TROUBLE_1990 * TROUBLE_1982 * TROUBLE_1980 * TROUBLE_1974 * TROUBLE_1970 * TROUBLE_1960