コード例 #1
0
ファイル: gradcam.py プロジェクト: nitinranjansharma/EVA5B2
def _plot_image(
    image_data: torch.Tensor,
    predicted: torch.Tensor,
    actual: torch.Tensor,
    class_labels: torch.Tensor,
    ax: axes.Axes,
):
    """
    Plots an image.

    Args:
        image_data (torch.Tensor): The image data.
        predicted (torch.Tensor): The predicted class
        actual (torch.Tensor): The actual class
        class_labels (torch.Tensor): The class labels
        ax (axes.Axes): The axes to plot on.
    """

    # turning off the axis lines in the plot
    ax.axis("off")

    # setting title
    ax.set_title("Predicted: %s\nActual: %s" % (
        int(predicted) if class_labels is None else class_labels[predicted],
        int(actual) if class_labels is None else class_labels[actual],
    ))

    # clipping input range
    if torch.is_floating_point(image_data):
        image_data = torch.clamp(image_data, 0, 1)
    else:
        image_data = torch.clamp(image_data, 0, 255)

    # plotting image
    ax.imshow(image_data.permute(1, 2, 0).cpu().numpy())
コード例 #2
0
def draw_traj(traj: np.ndarray, ax: Axes) -> None:
    """
        Args:
        -   

        Returns:
        -   None
    """
    lineX = traj[:, 0]
    lineY = traj[:, 1]
    ax.scatter(
        lineX[:OBS_NUM_WAYPTS],
        lineY[:OBS_NUM_WAYPTS],
        20,
        marker=".",
        color="g",
        zorder=10,
    )
    ax.scatter(
        lineX[OBS_NUM_WAYPTS:],
        lineY[OBS_NUM_WAYPTS:],
        20,
        marker=".",
        color="r",
        zorder=10,
    )
    # ax.plot(lineX, lineY, "--", color='k', linewidth=1, zorder=0)
    ax.text(lineX[0], lineY[0], "s")
    ax.text(lineX[-1], lineY[-1], "e")
    ax.axis("equal")
コード例 #3
0
 def _plot_image(self, image:Image, render_factor:int, axes:Axes=None, figsize=(20,20), display_render_factor:bool=False):
     if axes is None: 
         _,axes = plt.subplots(figsize=figsize)
     axes.imshow(np.asarray(image)/255)
     axes.axis('off')
     if render_factor is not None and display_render_factor:
         plt.text(10,10,'render_factor: ' + str(render_factor), color='white', backgroundcolor='black')
コード例 #4
0
ファイル: channelmap.py プロジェクト: tfiers/sharp
def draw_channelmap(ax: Axes,
                    active_channels: Optional[Sequence[int]] = None,
                    ms=5.5):
    ax.invert_yaxis()  # Origin: top-left
    ax.set_aspect("equal")
    ax.axis("off")
    # Close the path:
    probe_outline = tuple(config.probe_outline) + (config.probe_outline[0], )
    probe_x = [vertex[0] for vertex in probe_outline]
    probe_y = [vertex[1] for vertex in probe_outline]
    ax.plot(probe_x, probe_y, c="black", lw=1.5)
    num_channels = len(config.electrodes_x)
    all_channels = tuple(range(num_channels))
    if active_channels is None:
        active_channels = all_channels
    for channel in all_channels:
        if channel in active_channels:
            style = dict(color="black")
        else:
            style = dict(color="grey", markerfacecolor="none")
        ax.plot(
            config.electrodes_x[channel],
            config.electrodes_y[channel],
            marker="o",
            ms=ms,
            **style,
        )
コード例 #5
0
def _plot_image(
    image_data: torch.Tensor,
    predicted: int,
    actual: int,
    ax: axes.Axes,
    class_labels: Tuple[str] = None,
):
    """
    Plots an image.

    Args:
        image_data (torch.Tensor): The image data.
        predicted (int): The class which the model predicted the image belongs to.
        actual (int): The actual class the image belongs to.
        ax (axes.Axes): The ``~matplotlib.axes.Axes`` to plot on.
        class_labels (Tuple[str], optional): The class labels to use. Defaults to None.
    """

    # turning off the axis lines in the plot
    ax.axis("off")

    # setting title
    ax.set_title("Predicted: %s\nActual: %s" % (
        int(predicted) if class_labels is None else class_labels[predicted],
        int(actual) if class_labels is None else class_labels[actual],
    ))

    # clipping input range
    if torch.is_floating_point(image_data):
        image_data = torch.clamp(image_data, 0, 1)
    else:
        image_data = torch.clamp(image_data, 0, 255)

    # plotting image
    ax.imshow(image_data.permute(1, 2, 0).cpu().numpy(), cmap="gray_r")
コード例 #6
0
ファイル: best_track.py プロジェクト: WPringle/adcircpy
 def plot_track(
     self,
     axis: Axes = None,
     show: bool = False,
     color: str = 'k',
     coastline: bool = True,
     **kwargs,
 ):
     kwargs.update({'color': color})
     if axis is None:
         fig = pyplot.figure()
         axis = fig.add_subplot(111)
     for i in range(len(self.speed)):
         # when dealing with nautical degrees, U is sine and V is cosine.
         U = self.speed.iloc[i] * numpy.sin(
             numpy.deg2rad(self.direction.iloc[i]))
         V = self.speed.iloc[i] * numpy.cos(
             numpy.deg2rad(self.direction.iloc[i]))
         axis.quiver(self.longitude.iloc[i], self.latitude.iloc[i], U, V,
                     **kwargs)
         if i % 6 == 0:
             axis.annotate(
                 self.data['datetime'].iloc[i],
                 (self.longitude.iloc[i], self.latitude.iloc[i]),
             )
     if show:
         axis.axis('scaled')
     if bool(coastline) is True:
         plot_coastline(axis, show)
コード例 #7
0
def create_result_table_and_legend_plot(axis: Axes, data_to_plot: dict,
                                        handles_labels: tuple):
    """Creates the plot-section containing a table with result scores and labels.

    Args:
        axis (Axes): Axes-instances to use for the subplot
        data_to_plot (dict): Dictionary containing ``"Detection_Score"`` and ``"AP"``
            and corresponding mean values
        handles_labels (tuple[List, List]): Tuple of matplotlib handles and corresponding labels
    """

    required_keys = ["Detection_Score", "mDetection_Score", "AP", "mAP"]
    assert all([key in data_to_plot.keys() for key in required_keys])

    # Results
    axis.axis("off")
    axis.text(0, 0.95, 'Results', fontdict={'weight': 'bold', 'size': 16})

    y_pos_row = 0.75
    # 2D AP results
    create_table_row(axis,
                     0.00,
                     y_pos_row,
                     data_to_plot,
                     title="2D AP",
                     key="AP",
                     subdict_key="auc")

    # Detection score results
    create_table_row(axis,
                     0.28,
                     y_pos_row,
                     data_to_plot,
                     title="Detection Score",
                     key="Detection_Score",
                     subdict_key=None)

    # Legend
    x_pos_legend = 0.6
    y_pos_legend = 0.75
    y_pos_dot_size = 0.0
    axis.text(x_pos_legend,
              0.95,
              'Legend',
              fontdict={
                  'weight': 'bold',
                  'size': 16
              })
    axis.legend(*handles_labels,
                frameon=True,
                loc="upper left",
                bbox_to_anchor=(x_pos_legend, y_pos_legend),
                ncol=2)

    # add data-point-marker size explanation
    dot_size_explanation = "The size of each data-point-marker indicates\n"
    dot_size_explanation += "the relative amount of samples for that data-\n"
    dot_size_explanation += "point, with large dots indicate larger samples."
    axis.text(x_pos_legend, y_pos_dot_size, dot_size_explanation)
コード例 #8
0
def draw_contour(subplot: Axes,
                 contour: np.ndarray,
                 color: str = 'black',
                 title: str = None):
    if title is not None:
        subplot.set_title(title)
    subplot.plot(*contour.transpose(), color=color)
    subplot.axis('equal')
コード例 #9
0
 def _plot_image_from_ndarray(self,
                              image: ndarray,
                              axes: Axes = None,
                              figsize=(20, 20)):
     if axes is None:
         _, axes = plt.subplots(figsize=figsize)
     clipped_image = np.clip(image, 0, 1)
     axes.imshow(clipped_image)
     axes.axis('off')
コード例 #10
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = 3,
                           axes_limit: float = 40,
                           ax: Axes = None) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar data (measured in meters).
        :param ax: Axes onto which to render.
        """

        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(
            sample_data_token, box_vis_level=box_vis_level)

        if sensor_modality == 'lidar':
            data = PointCloud.from_file(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))
            points = view_points(data.points[:3, :],
                                 np.eye(4),
                                 normalize=False)
            ax.scatter(points[0, :], points[1, :], c=points[2, :], s=1)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=[c, c, c])
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            data = Image.open(data_path)
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))
            ax.imshow(data)
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax,
                               view=camera_intrinsic,
                               normalize=True,
                               colors=[c, c, c])
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("RADAR rendering not implemented yet.")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')
コード例 #11
0
ファイル: app.py プロジェクト: MartinBCN/NeuralStyleTransfer
def imshow(tensor, title=None, ax: Axes = None):
    if ax is None:
        ax = plt
    image = tensor.cpu().clone()  # we clone the tensor to not do changes on it
    image = image.squeeze(0)      # remove the fake batch dimension
    image = unloader(image)
    ax.imshow(image)
    ax.axis('off')
    if title is not None:
        ax.title(title)
コード例 #12
0
def render_sample_labelonly(sample_token: str,
                            with_anns: bool = True,
                            box_vis_level: BoxVisibility = BoxVisibility.ANY,
                            axes_limit: float = 50,
                            ax: Axes = None,
                            out_path: str = None) -> None:
    """
    Render sample data onto axis.
    :param sample_data_token: Sample_data token.
    :param with_anns: Whether to draw annotations.
    :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
    :param axes_limit: Axes limit for lidar and radar (measured in meters).
    :param ax: Axes onto which to render.
    :param nsweeps: Number of sweeps for lidar and radar.
    """

    # Get sensor modality.  should be 'LIDAR_TOP'
    lidar_sd_record = nusc.get('sample_data',
                               sample_token['data']['LIDAR_TOP'])

    # Get boxes in lidar frame.
    _, boxes, _ = nusc.get_sample_data(sample_token['data']['LIDAR_TOP'],
                                       box_vis_level=box_vis_level)
    # _, boxes, camera_intrinsic = nusc.get_sample_data(sample_token['data']['CAM_FRONT'], box_vis_level=box_vis_level)
    # Note that the boxes are transformed into the current sensor's coordinate frame.

    # Init axes.
    if ax is None:
        fig, ax = plt.subplots(1, 1, figsize=(9, 9))

    # Show ego vehicle.
    ax.plot(0, 0, 'x', color='red')

    # Show boxes.
    if with_anns:
        for box in boxes:
            c = np.array(get_color(box.name)) / 255.0
            box.render(ax,
                       view=np.eye(4),
                       normalize=False,
                       colors=(c, c, c),
                       linewidth=1)
            # box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c))

    # Limit visible range.
    ax.set_xlim(-axes_limit, axes_limit)
    ax.set_ylim(-axes_limit, axes_limit)

    ax.axis('off')
    plt.tight_layout()
    ax.set_title('TOP')
    ax.set_aspect('equal')
    if out_path != None:
        fig.savefig(out_path)  # save the figure to file
        plt.close(fig)  # close the figure
コード例 #13
0
def plot_map(ax_in: Axes):
    """Plot map in the standard manner.

    Arguments:
        ax_in {Axes} -- Axes for the map!
    """
    img = mpimg.imread(environment['MAP_PATH'])
    ax_in.imshow(img, extent=[0, 1, 0, 1], zorder=0)
    ax_in.axis('off')
    ax_in.set_xlim(0, 1)
    ax_in.set_ylim(0, 1)
コード例 #14
0
def add_table(ax: Axes, data: List[List[str]], index: List[str]=None, columns: List[str]=None, axis_off=True, **kwargs):
    table_kwargs = {
        "cellLoc": "center",
        "colLoc": "center",
        "rowLoc": "right",
        "loc": "center",
        # "bbox": ax.bbox.bounds
    }
    table_kwargs.update(kwargs)
    if axis_off:
        ax.axis('off')
    return ax.table(data, colLabels=columns, rowLabels=index, **table_kwargs)
コード例 #15
0
ファイル: display.py プロジェクト: wym109/causalnex
def display_plot_mpl(
    viz: AGraph,
    prog: str = "neato",
    ax: Axes = None,
    pixel_size_in: float = 0.01,
) -> Tuple[Figure, Axes]:
    """
    Displays a pygraphviz object using matplotlib.

    Args:
        viz: pygraphviz object to render.

        prog: The graph layout. Avaliable are:
        dot, neato, fdp, sfdp, twopi and circo

        ax: Optional matplotlib axes to plot on.

        pixel_size_in: Scaling multiple for the plot.

    Returns:
        IPython Image object. Renders in a notebook.

    Raises:
        ImportError: if matplotlib is not installed (optional dependency).
    """

    if Figure is Any:
        raise ImportError(
            "display_plot_mpl method requires matplotlib installed.")

    # bytes:
    s = viz.draw(format="png", prog=prog)
    # convert to numpy array
    array = plt.imread(io.BytesIO(s))
    x_dim, y_dim, _ = array.shape

    # handle passed axis
    if ax is not None:
        ax.imshow(array)
        ax.axis("off")
        return None, ax

    # handle new axis
    f, ax = plt.subplots(1,
                         1,
                         figsize=(y_dim * pixel_size_in,
                                  x_dim * pixel_size_in))
    ax.imshow(array)
    ax.axis("off")
    f.tight_layout(pad=0.0)
    return f, ax
コード例 #16
0
ファイル: visualise.py プロジェクト: Gajdosik96/DP20
 def _plot_image_custom(self,
                        image: Image,
                        label: str,
                        axes: Axes = None,
                        figsize=(20, 20)):
     if axes is None:
         _, axes = plt.subplots(figsize=figsize)
     axes.imshow(image)
     axes.axis('off')
     plt.text(
         10,
         10,
         'Label: ' + label,
         color='white',
         backgroundcolor='black',
     )
コード例 #17
0
 def _plot_image(
         self,
         image: Image,
         render_factor: int,
         axes: Axes = None,
         figsize=(20, 20),
         display_render_factor=False,
 ):
     if axes is None:
         _, axes = plt.subplots(figsize=figsize)
     axes.imshow(np.asarray(image) / 255)
     axes.axis("off")
     if render_factor is not None and display_render_factor:
         plt.text(
             10,
             10,
             "render_factor: " + str(render_factor),
             color="white",
             backgroundcolor="black",
         )
コード例 #18
0
 def plot(self, ax: Axes):
     ax.axis("off")
     for lineno, indicators in enumerate(
             self._indicators[::-1]):  # lineno: 自下而上的行号
         for index_in_line, i in enumerate(indicators):
             x = index_in_line * INDICATOR_WIDTH
             y_value = lineno * (INDICATOR_VALUE_HEIGHT +
                                 INDICATOR_LABEL_HEIGHT)
             y_label = y_value + INDICATOR_LABEL_HEIGHT
             try:
                 value = i.formatter.format(self._values[i.key])
             except KeyError:
                 value = "nan"
             ax.text(x,
                     y_label,
                     i.label,
                     color=i.color,
                     fontsize=LABEL_FONT_SIZE),
             ax.text(x,
                     y_value,
                     value,
                     color=BLACK,
                     fontsize=i.value_font_size)
コード例 #19
0
    def _plot_totals(self, total_barplot_ax: Axes,
                     orientation: Literal['top', 'right']):
        """
        Makes the bar plot for totals
        """
        params = self.plot_group_extra
        counts_df = params['counts_df']
        if self.categories_order is not None:
            counts_df = counts_df.loc[self.categories_order]
        if params['color'] is None:
            if f'{self.groupby}_colors' in self.adata.uns:
                color = self.adata.uns[f'{self.groupby}_colors']
            else:
                color = 'salmon'
        else:
            color = params['color']

        if orientation == 'top':
            counts_df.plot(
                kind="bar",
                color=color,
                position=0.5,
                ax=total_barplot_ax,
                edgecolor="black",
                width=0.65,
            )
            # add numbers to the top of the bars
            max_y = max([p.get_height() for p in total_barplot_ax.patches])

            for p in total_barplot_ax.patches:
                p.set_x(p.get_x() + 0.5)
                if p.get_height() >= 1000:
                    display_number = f'{np.round(p.get_height()/1000, decimals=1)}k'
                else:
                    display_number = np.round(p.get_height(), decimals=1)
                total_barplot_ax.annotate(
                    display_number,
                    (p.get_x() + p.get_width() / 2.0,
                     (p.get_height() + max_y * 0.05)),
                    ha="center",
                    va="top",
                    xytext=(0, 10),
                    fontsize="x-small",
                    textcoords="offset points",
                )
            # for k in total_barplot_ax.spines.keys():
            #     total_barplot_ax.spines[k].set_visible(False)
            total_barplot_ax.set_ylim(0, max_y * 1.4)

        elif orientation == 'right':
            counts_df.plot(
                kind="barh",
                color=color,
                position=-0.3,
                ax=total_barplot_ax,
                edgecolor="black",
                width=0.65,
            )

            # add numbers to the right of the bars
            max_x = max([p.get_width() for p in total_barplot_ax.patches])
            for p in total_barplot_ax.patches:
                if p.get_width() >= 1000:
                    display_number = f'{np.round(p.get_width()/1000, decimals=1)}k'
                else:
                    display_number = np.round(p.get_width(), decimals=1)
                total_barplot_ax.annotate(
                    display_number,
                    ((p.get_width()), p.get_y() + p.get_height()),
                    ha="center",
                    va="top",
                    xytext=(10, 10),
                    fontsize="x-small",
                    textcoords="offset points",
                )
            total_barplot_ax.set_xlim(0, max_x * 1.4)

        total_barplot_ax.grid(False)
        total_barplot_ax.axis("off")
コード例 #20
0
from matplotlib.backends.backend_agg import FigureCanvasAgg

import dna_walk_utils as walk
import random


size_in_inches = [8, 8] #width, height
fig = Figure(figsize=size_in_inches)
ax = Axes(fig, [.1,.1,.8,.8])
fig.add_axes(ax)

bases = walk.TEST_SEQUENCES["Acinetobacter_calcoaceticus"]
bases2 = walk.TEST_SEQUENCES["A.israelii"]

for samplename in walk.TEST_SEQUENCES.keys():
    bases = walk.TEST_SEQUENCES[samplename]
    print "Walking", samplename
    clr = random.choice("bgrcmyk")
    def on_window(from_loc, to_loc):
        ax.add_line(Line2D([from_loc.x, to_loc.x],[from_loc.y, to_loc.y], color=clr))
    walk.do_line_walk(bases, 4, on_window, )

ax.relim()
ax.axis('tight')
ax.set_title('DNA Walk of "All Seqs.fasta"\n(16S Example Set)')

canvas = FigureCanvasAgg(fig)
canvas.print_figure("dna_walk.png")                 
 

コード例 #21
0
    def draw_pentagon(self, axes: Axes) -> None:
        points = self.get_end_points()
        points = np.insert(points, 0, [0, 0], axis=0)

        min_x, min_y = np.amin(points, axis=0)
        max_x, max_y = np.amax(points, axis=0)

        axes.axis([min_x - .5, max_x + .5, max_y + .5, min_y - .5])
        axes.set_aspect('1.7')
        axes.axis('off')

        path = mpath.Path(points, closed=True)
        patch = mpatches.PathPatch(path, facecolor='white', lw=1)
        axes.add_patch(patch)

        test_points = [(x, y + .5) for y in range(min_y - 2, max_y + 2)
                       for x in range(min_x - 1, max_x + 2)]
        test_results = path.contains_points(test_points)
        triangle_points = [
            (x, math.floor(y))
            for ((x, y), result) in zip(test_points, test_results) if result
        ]
        assert len(triangle_points) == self.get_size()
        for i, (x, y) in enumerate(triangle_points, start=1):
            fontsize = 6 if i >= 100 else 8
            if (x + y) % 2 == 1:
                axes.plot([x - 1, x + 1, x, x - 1], [y, y, y + 1, y],
                          lw=0.5,
                          color='black')
                axes.text(x,
                          y + .3,
                          str(i),
                          fontsize=fontsize,
                          fontweight='bold',
                          va='center',
                          ha='center')
            else:
                # We only need to draw half the triangles.  The three sides of every triangle are either a triangle
                # pointing the other way, or the border.
                # axes.plot([x - 1, x + 1, x, x - 1], [y + 1, y + 1, y, y + 1], lw=0.5, color='black')
                axes.text(x,
                          y + .7,
                          str(i),
                          fontsize=fontsize,
                          fontweight='bold',
                          va='center',
                          ha='center')

        all_points = self.get_all_points()
        x_points, y_points = list(zip(*all_points))
        axes.plot(x_points, y_points,
                  "bh")  # black hexagonal.  A hexagon seems appropriate!

        for (x1, y1), (x2, y2), length in zip(points, points[1:], self.values):
            mid_x, mid_y = (x1 + x2) / 2, (y1 + y2) / 2
            slope = (y2 - y1) / (x2 - x1)
            magnitude = math.sqrt(slope * slope + 1)
            offset_x, offset_y = .4 * slope / magnitude, .4 * -1.0 / magnitude

            if path.contains_point((mid_x + offset_x, mid_y + offset_y)):
                offset_x, offset_y = -offset_x, -offset_y
            axes.text(mid_x + offset_x,
                      mid_y + offset_y,
                      str(length),
                      color='red',
                      size='large',
                      va='center',
                      ha='center')
コード例 #22
0
    def _draw_grid_world(self, axis: Axes) -> None:

        draw_kwargs: Dict[str, Any] = dict(linestyle="-", color="k", alpha=0.5)

        col: int
        for col in range(self.width):
            axis.plot((col - 0.5) * np.ones(2), [-0.5, self.height - 0.5],
                      **draw_kwargs)

        axis.plot((self.width - 0.5) * np.ones(2), [-0.5, self.height - 0.5],
                  **draw_kwargs)

        row: int
        for row in range(self.height):
            axis.plot([-0.5, self.width - 0.5], (row - 0.5) * np.ones(2),
                      **draw_kwargs)

        axis.plot([-0.5, self.width - 0.5], (self.height - 0.5) * np.ones(2),
                  **draw_kwargs)

        axis.add_artist(
            plt.Circle(
                self.get_start_state(),
                radius=GridWorld.ARROW_LENGTH * 0.5 * 1.1,
                color="b",
                fill=False,
            ))

        for terminal_state in self.get_terminal_states():
            axis.add_artist(
                plt.Circle(
                    terminal_state,
                    radius=GridWorld.ARROW_LENGTH * 0.5 * 1.1,
                    color="k",
                    fill=True,
                ))

        xlim: Tuple[int, int] = [-1.0, self.width]
        ylim: Tuple[int, int] = [-1.0, self.height]
        # TODO adding typing from here

        if self.upward_wind_list:
            for col in range(self.width):
                axis.text(
                    col,
                    -1.0,
                    f"{self.upward_wind_list[col]:.1f}",
                    ha="center",
                    va="top",
                )
                ylim[0] = -1.5

        if self.rightward_wind_list:
            for row in range(self.height):
                axis.text(-1.0, row, f"{self.rightward_wind_list[row]:.1f}")
                xlim[0] = -1.5

        axis.set_xlim(xlim)
        axis.set_ylim(ylim)

        axis.axis("equal")
        axis.axis("off")
コード例 #23
0
ファイル: test_polygons.py プロジェクト: sungheeyun/GeoMagic
 def _finish_fig(figure: Figure, axis: Axes) -> None:
     axis.axis("off")
     axis.axis("equal")
     figure.show()
コード例 #24
0
def set_axis_text(ax: Axes, text: str, size: int = 36):
    ax.axis('off')
    ax.text(0.5, -0.15, text, size=size, ha="center", transform=ax.transAxes)
コード例 #25
0
def show_one_decomposed_scatter(raw_chunks_x,
                                raw_chunks_y,
                                *,
                                ax: Axes = None,
                                xlabel=None,
                                ylabel=None,
                                title=None,
                                color_bias: int = None,
                                letter_map=None):
    if letter_map is not None:
        ax.text(0,
                1,
                chr(letter_map + ord('a')),
                horizontalalignment='left',
                verticalalignment='top',
                fontweight='bold',
                transform=ax.transAxes)

    # this is for spotlight
    assert len(raw_chunks_x) == len(raw_chunks_y)
    color_list = plt.get_cmap('Set2').colors
    chunk_x_all, chunk_y_all = [], []
    for idx, (raw_chunk_this_x,
              raw_chunk_this_y) in enumerate(zip(raw_chunks_x, raw_chunks_y)):
        assert raw_chunk_this_x.shape == raw_chunk_this_y.shape == (
            raw_chunk_this_y.size, )

        # only sample up to 50 points.
        if raw_chunk_this_x.size > 50:
            rng_state_this = np.random.RandomState(seed=0)
            index_rand = rng_state_this.choice(raw_chunk_this_x.size,
                                               50,
                                               replace=False)
        else:
            index_rand = slice(None)

        ax.scatter(raw_chunk_this_x[index_rand],
                   raw_chunk_this_y[index_rand],
                   color=color_list[color_bias + idx],
                   alpha=0.5,
                   s=12)
        # show linear regression line.
        fit_this = np.polyfit(raw_chunk_this_x, raw_chunk_this_y, deg=1)

        #         start_end_vector_this = np.array([raw_chunk_this_x.min(),raw_chunk_this_x.max()])
        start_end_vector_this = np.array([0.5, 1])
        ax.plot(start_end_vector_this,
                (fit_this[0] *
                 (start_end_vector_this - 0.5) * 2 + fit_this[1]) / 2,
                color=color_list[color_bias + idx],
                linewidth=1)

        #         ax.text(0.95,0.6-idx*0.1, '{:.3f}'.format(fit_this[0]),
        #                 horizontalalignment='right',
        #                 verticalalignment='top', fontsize='medium',
        #                 color=color_list[color_bias+idx])

        chunk_x_all.append(raw_chunk_this_x.copy())
        chunk_y_all.append(raw_chunk_this_y.copy())

        # add text.

    chunk_x_all = np.concatenate(chunk_x_all)
    chunk_y_all = np.concatenate(chunk_y_all)
    fit_this = np.polyfit(chunk_x_all, chunk_y_all, deg=1)
    #     start_end_vector_this = np.array([chunk_x_all.min(),chunk_x_all.max()])
    start_end_vector_this = np.array([0.5, 1])
    # linear transform things values in [0,1] x [0,1] to [0.5,1] x [0,0.5]
    ax.plot(start_end_vector_this,
            (fit_this[0] *
             (start_end_vector_this - 0.5) * 2 + fit_this[1]) / 2,
            color='black',
            linewidth=1,
            linestyle='--')

    ax.plot([0, 1], [0, 1], linestyle='--')
    ax.set_xlim(-0.15, 1.1)
    ax.set_ylim(-0.15, 1.1)
    ax.axis('off')

    if title is not None:
        ax.text(0.5,
                0.975,
                title,
                horizontalalignment='center',
                verticalalignment='top',
                fontsize='medium',
                transform=ax.transAxes)
    if xlabel is not None:
        ax.text(
            0.5,
            0,
            xlabel,
            horizontalalignment='center',
            verticalalignment='top',
            fontsize='medium',
        )
    if ylabel is not None:
        ax.text(
            0,
            0.5,
            ylabel,
            rotation='vertical',
            horizontalalignment='right',
            verticalalignment='center',
            fontsize='medium',
        )

    # add pearson stuff
    ax.text(0,
            1,
            '{:.4f}'.format(chunk_y_all.mean()),
            horizontalalignment='left',
            verticalalignment='top',
            fontsize='medium')
    ax.text(1,
            0,
            '{:.4f}'.format(chunk_x_all.mean()),
            horizontalalignment='right',
            verticalalignment='bottom',
            fontsize='medium')
    corr_this = pearsonr(chunk_x_all, chunk_y_all)[0]
    r_text = 'n={}\nr={:.2f}'.format(chunk_x_all.size, corr_this)
    ax.text(0, 0.7, r_text, fontsize='medium', horizontalalignment='left')
コード例 #26
0
    def _plot_var_groups_brackets(
        gene_groups_ax: Axes,
        group_positions: Iterable[Tuple[int, int]],
        group_labels: Sequence[str],
        left_adjustment: float = -0.3,
        right_adjustment: float = 0.3,
        rotation: Optional[float] = None,
        orientation: Literal['top', 'right'] = 'top',
    ):
        """\
        Draws brackets that represent groups of genes on the give axis.
        For best results, this axis is located on top of an image whose
        x axis contains gene names.

        The gene_groups_ax should share the x axis with the main ax.

        Eg: gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=dot_ax)

        Parameters
        ----------
        gene_groups_ax
            In this axis the gene marks are drawn
        group_positions
            Each item in the list, should contain the start and end position that the
            bracket should cover.
            Eg. [(0, 4), (5, 8)] means that there are two brackets, one for the var_names (eg genes)
            in positions 0-4 and other for positions 5-8
        group_labels
            List of group labels
        left_adjustment
            adjustment to plot the bracket start slightly before or after the first gene position.
            If the value is negative the start is moved before.
        right_adjustment
            adjustment to plot the bracket end slightly before or after the last gene position
            If the value is negative the start is moved before.
        rotation
            rotation degrees for the labels. If not given, small labels (<4 characters) are not
            rotated, otherwise, they are rotated 90 degrees
        orientation
            location of the brackets. Either `top` or `right`
        Returns
        -------
        None
        """
        import matplotlib.patches as patches
        from matplotlib.path import Path

        # get the 'brackets' coordinates as lists of start and end positions

        left = [x[0] + left_adjustment for x in group_positions]
        right = [x[1] + right_adjustment for x in group_positions]

        # verts and codes are used by PathPatch to make the brackets
        verts = []
        codes = []
        if orientation == 'top':
            # rotate labels if any of them is longer than 4 characters
            if rotation is None and group_labels:
                if max([len(x) for x in group_labels]) > 4:
                    rotation = 90
                else:
                    rotation = 0
            for idx, (left_coor, right_coor) in enumerate(zip(left, right)):
                verts.append((left_coor, 0))  # lower-left
                verts.append((left_coor, 0.6))  # upper-left
                verts.append((right_coor, 0.6))  # upper-right
                verts.append((right_coor, 0))  # lower-right

                codes.append(Path.MOVETO)
                codes.append(Path.LINETO)
                codes.append(Path.LINETO)
                codes.append(Path.LINETO)

                group_x_center = left[idx] + float(right[idx] - left[idx]) / 2
                gene_groups_ax.text(
                    group_x_center,
                    1.1,
                    group_labels[idx],
                    ha='center',
                    va='bottom',
                    rotation=rotation,
                )
        else:
            top = left
            bottom = right
            for idx, (top_coor, bottom_coor) in enumerate(zip(top, bottom)):
                verts.append((0, top_coor))  # upper-left
                verts.append((0.4, top_coor))  # upper-right
                verts.append((0.4, bottom_coor))  # lower-right
                verts.append((0, bottom_coor))  # lower-left

                codes.append(Path.MOVETO)
                codes.append(Path.LINETO)
                codes.append(Path.LINETO)
                codes.append(Path.LINETO)

                diff = bottom[idx] - top[idx]
                group_y_center = top[idx] + float(diff) / 2
                if diff * 2 < len(group_labels[idx]):
                    # cut label to fit available space
                    group_labels[idx] = group_labels[idx][:int(diff * 2)] + "."
                gene_groups_ax.text(
                    1.1,
                    group_y_center,
                    group_labels[idx],
                    ha='right',
                    va='center',
                    rotation=270,
                    fontsize='small',
                )

        path = Path(verts, codes)

        patch = patches.PathPatch(path, facecolor='none', lw=1.5)

        gene_groups_ax.add_patch(patch)
        gene_groups_ax.grid(False)
        gene_groups_ax.axis('off')
        # remove y ticks
        gene_groups_ax.tick_params(axis='y', left=False, labelleft=False)
        # remove x ticks and labels
        gene_groups_ax.tick_params(axis='x',
                                   bottom=False,
                                   labelbottom=False,
                                   labeltop=False)
コード例 #27
0
def render_sample_data(sample_data_token: str,
                       with_anns: bool = True,
                       box_vis_level: BoxVisibility = BoxVisibility.ANY,
                       axes_limit: float = 40,
                       ax: Axes = None,
                       num_sweeps: int = 1,
                       out_path: str = None,
                       underlay_map: bool = False,
                       detections: list = [],
                       categories: list = [],
                       valLyft: LyftDataset = None):
    """Render sample data onto axis.

    Args:
        sample_data_token: Sample_data token.
        with_anns: Whether to draw annotations.
        box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        axes_limit: Axes limit for lidar and radar (measured in meters).
        ax: Axes onto which to render.
        num_sweeps: Number of sweeps for lidar and radar.
        out_path: Optional path to save the rendered figure to disk.
        underlay_map: When set to true, LIDAR data is plotted onto the map. This can be slow.

    """

    # Get sensor modality.
    sd_record = valLyft.get("sample_data", sample_data_token)
    sensor_modality = sd_record["sensor_modality"]

    if sensor_modality == "camera":
        # Load boxes and image.
        data_path, _, camera_intrinsic = valLyft.get_sample_data(
            sample_data_token, box_vis_level=box_vis_level)
        data = Image.open(data_path)

        # Init axes.
        if ax is None:
            _, ax = plt.subplots(1, 1, figsize=(9, 16))

        # Show image.
        ax.imshow(data)
        #categories = ['car', 'pedestrian', 'truck', 'bicycle', 'bus', 'other_vehicle', 'motorcycle', 'emergency_vehicle', 'animal']
        # Show boxes.
        if with_anns:
            boxes = []
            for c1, detection in enumerate(detections):
                #print(categories)
                cat = categories[c1]
                #print(cat)
                #import pdb; pdb.set_trace()
                box = Box(detection[:3],
                          detection[3:6],
                          Quaternion(np.array(detection[6:10])),
                          name=cat)
                boxes.append(box)
            for box in boxes:
                c = np.array(get_color(box.name)) / 255.0
                box.render(ax,
                           view=camera_intrinsic,
                           normalize=True,
                           colors=(c, c, c))

        # Limit visible range.
        ax.set_xlim(0, data.size[0])
        ax.set_ylim(data.size[1], 0)

    ax.axis("off")
    ax.set_title(sd_record["channel"])
    ax.set_aspect("equal")

    if out_path is not None:
        num = len([name for name in os.listdir(out_path)])
        out_path = out_path + str(num).zfill(
            5) + "_" + sample_data_token + ".png"
        plt.savefig(out_path)
        plt.close("all")
        return out_path
コード例 #28
0
    def render_sample_data(self,
                           sample_data_token: str,
                           with_anns: bool = True,
                           box_vis_level: BoxVisibility = BoxVisibility.ANY,
                           axes_limit: float = 40,
                           ax: Axes = None,
                           nsweeps: int = 1) -> None:
        """
        Render sample data onto axis.
        :param sample_data_token: Sample_data token.
        :param with_anns: Whether to draw annotations.
        :param box_vis_level: If sample_data is an image, this sets required visibility for boxes.
        :param axes_limit: Axes limit for lidar and radar (measured in meters).
        :param ax: Axes onto which to render.
        :param nsweeps: Number of sweeps for lidar and radar.
        """

        # Get sensor modality.
        sd_record = self.nusc.get('sample_data', sample_data_token)
        sensor_modality = sd_record['sensor_modality']

        if sensor_modality == 'lidar':
            # Get boxes in lidar frame.
            _, boxes, _ = self.nusc.get_sample_data(sample_data_token, box_vis_level=box_vis_level)

            # Get aggregated point cloud in lidar frame.
            sample_rec = self.nusc.get('sample', sd_record['sample_token'])
            chan = sd_record['channel']
            ref_chan = 'LIDAR_TOP'
            pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps)

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            # Show point cloud.
            points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
            dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
            colors = np.minimum(1, dists/axes_limit/np.sqrt(2))
            ax.scatter(points[0, :], points[1, :], c=colors, s=0.2)

            # Show ego vehicle.
            ax.plot(0, 0, 'x', color='black')

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'radar':
            # Get boxes in lidar frame.
            sample_rec = self.nusc.get('sample', sd_record['sample_token'])
            lidar_token = sample_rec['data']['LIDAR_TOP']
            _, boxes, _ = self.nusc.get_sample_data(lidar_token, box_vis_level=box_vis_level)

            # Get aggregated point cloud in lidar frame.
            # The point cloud is transformed to the lidar frame for visualization purposes.
            chan = sd_record['channel']
            ref_chan = 'LIDAR_TOP'
            pc, times = RadarPointCloud.from_file_multisweep(self.nusc, sample_rec, chan, ref_chan, nsweeps=nsweeps)

            # Transform radar velocities (x is front, y is left), as these are not transformed when loading the point
            # cloud.
            radar_cs_record = self.nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
            lidar_sd_record = self.nusc.get('sample_data', lidar_token)
            lidar_cs_record = self.nusc.get('calibrated_sensor', lidar_sd_record['calibrated_sensor_token'])
            velocities = pc.points[8:10, :]  # Compensated velocity
            velocities = np.vstack((velocities, np.zeros(pc.points.shape[1])))
            velocities = np.dot(Quaternion(radar_cs_record['rotation']).rotation_matrix, velocities)
            velocities = np.dot(Quaternion(lidar_cs_record['rotation']).rotation_matrix.T, velocities)
            velocities[2, :] = np.zeros(pc.points.shape[1])

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            # Show point cloud.
            points = view_points(pc.points[:3, :], np.eye(4), normalize=False)
            dists = np.sqrt(np.sum(pc.points[:2, :] ** 2, axis=0))
            colors = np.minimum(1, dists / axes_limit / np.sqrt(2))
            sc = ax.scatter(points[0, :], points[1, :], c=colors, s=3)

            # Show velocities.
            points_vel = view_points(pc.points[:3, :] + velocities, np.eye(4), normalize=False)
            max_delta = 10
            deltas_vel = points_vel - points
            deltas_vel = 3 * deltas_vel  # Arbitrary scaling
            deltas_vel = np.clip(deltas_vel, -max_delta, max_delta)  # Arbitrary clipping
            colors_rgba = sc.to_rgba(colors)
            for i in range(points.shape[1]):
                ax.arrow(points[0, i], points[1, i], deltas_vel[0, i], deltas_vel[1, i], color=colors_rgba[i])

            # Show ego vehicle.
            ax.plot(0, 0, 'x', color='black')

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=np.eye(4), colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(-axes_limit, axes_limit)
            ax.set_ylim(-axes_limit, axes_limit)

        elif sensor_modality == 'camera':
            # Load boxes and image.
            data_path, boxes, camera_intrinsic = self.nusc.get_sample_data(sample_data_token,
                                                                           box_vis_level=box_vis_level)
            data = Image.open(data_path)

            # Init axes.
            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))

            # Show image.
            ax.imshow(data)

            # Show boxes.
            if with_anns:
                for box in boxes:
                    c = np.array(self.get_color(box.name)) / 255.0
                    box.render(ax, view=camera_intrinsic, normalize=True, colors=(c, c, c))

            # Limit visible range.
            ax.set_xlim(0, data.size[0])
            ax.set_ylim(data.size[1], 0)

        else:
            raise ValueError("Error: Unknown sensor modality!")

        ax.axis('off')
        ax.set_title(sd_record['channel'])
        ax.set_aspect('equal')
コード例 #29
0
def plot_pianoroll(
    ax: Axes,
    pianoroll: ndarray,
    is_drum: bool = False,
    resolution: Optional[int] = None,
    downbeats: Optional[Sequence[int]] = None,
    preset: str = "full",
    cmap: str = "Blues",
    xtick: str = "auto",
    ytick: str = "octave",
    xticklabel: bool = True,
    yticklabel: str = "auto",
    tick_loc: Sequence[str] = ("bottom", "left"),
    tick_direction: str = "in",
    label: str = "both",
    grid_axis: str = "both",
    grid_linestyle: str = ":",
    grid_linewidth: float = 0.5,
    **kwargs,
):
    """
    Plot a piano roll.

    Parameters
    ----------
    ax : :class:`matplotlib.axes.Axes`
        Axes to plot the piano roll on.
    pianoroll : ndarray, shape=(?, 128), (?, 128, 3) or (?, 128, 4)
        Piano roll to plot. For a 3D piano-roll array, the last axis can
        be either RGB or RGBA.
    is_drum : bool
        Whether it is a percussion track. Defaults to False.
    resolution : int
        Time steps per quarter note. Required if `xtick` is 'beat'.
    downbeats : list
        Boolean array that indicates whether the time step contains a
        downbeat (i.e., the first time step of a bar).
    preset : {'full', 'frame', 'plain'}
        Preset theme. For 'full' preset, ticks, grid and labels are on.
        For 'frame' preset, ticks and grid are both off. For 'plain'
        preset, the x- and y-axis are both off. Defaults to 'full'.
    cmap : str or :class:`matplotlib.colors.Colormap`
        Colormap. Will be passed to :func:`matplotlib.pyplot.imshow`.
        Only effective when `pianoroll` is 2D. Defaults to 'Blues'.
    xtick : {'auto', 'beat', 'step', 'off'}
        Tick format for the x-axis. For 'auto' mode, set to 'beat' if
        `resolution` is given, otherwise set to 'step'. Defaults to
        'auto'.
    ytick : {'octave', 'pitch', 'off'}
        Tick format for the y-axis. Defaults to 'octave'.
    xticklabel : bool
        Whether to add tick labels along the x-axis.
    yticklabel : {'auto', 'name', 'number', 'off'}
        Tick label format for the y-axis. For 'name' mode, use pitch
        name as tick labels. For 'number' mode, use pitch number. For
        'auto' mode, set to 'name' if `ytick` is 'octave' and 'number'
        if `ytick` is 'pitch'. Defaults to 'auto'.
    tick_loc : sequence of {'bottom', 'top', 'left', 'right'}
        Tick locations. Defaults to `('bottom', 'left')`.
    tick_direction : {'in', 'out', 'inout'}
        Tick direction. Defaults to 'in'.
    label : {'x', 'y', 'both', 'off'}
        Whether to add labels to x- and y-axes. Defaults to 'both'.
    grid_axis : {'x', 'y', 'both', 'off'}
        Whether to add grids to the x- and y-axes. Defaults to 'both'.
    grid_linestyle : str
        Grid line style. Will be passed to
        :meth:`matplotlib.axes.Axes.grid`.
    grid_linewidth : float
        Grid line width. Will be passed to
        :meth:`matplotlib.axes.Axes.grid`.
    **kwargs
        Keyword arguments to be passed to
        :meth:`matplotlib.axes.Axes.imshow`.

    """
    # Plot the piano roll
    if pianoroll.ndim == 2:
        transposed = pianoroll.T
    elif pianoroll.ndim == 3:
        transposed = pianoroll.transpose(1, 0, 2)
    else:
        raise ValueError("`pianoroll` must be a 2D or 3D numpy array")

    img = ax.imshow(
        transposed,
        cmap=cmap,
        aspect="auto",
        vmin=0,
        vmax=1 if pianoroll.dtype == np.bool_ else 127,
        origin="lower",
        interpolation="none",
        **kwargs,
    )

    # Format ticks and labels
    if xtick == "auto":
        xtick = "beat" if resolution is not None else "step"
    elif xtick not in ("beat", "step", "off"):
        raise ValueError(
            "`xtick` must be one of 'auto', 'beat', 'step' or 'off', not "
            f"{xtick}.")
    if yticklabel == "auto":
        yticklabel = "name" if ytick == "octave" else "number"
    elif yticklabel not in ("name", "number", "off"):
        raise ValueError(
            "`yticklabel` must be one of 'auto', 'name', 'number' or 'off', "
            f"{yticklabel}.")

    if preset == "full":
        ax.tick_params(
            direction=tick_direction,
            bottom=("bottom" in tick_loc),
            top=("top" in tick_loc),
            left=("left" in tick_loc),
            right=("right" in tick_loc),
            labelbottom=xticklabel,
            labelleft=(yticklabel != "off"),
            labeltop=False,
            labelright=False,
        )
    elif preset == "frame":
        ax.tick_params(
            direction=tick_direction,
            bottom=False,
            top=False,
            left=False,
            right=False,
            labelbottom=False,
            labeltop=False,
            labelleft=False,
            labelright=False,
        )
    elif preset == "plain":
        ax.axis("off")
    else:
        raise ValueError(
            f"`preset` must be one of 'full', 'frame' or 'plain', not {preset}"
        )

    # Format x-axis
    if xtick == "beat" and preset != "frame":
        if resolution is None:
            raise ValueError(
                "`resolution` must not be None when `xtick` is 'beat'.")
        n_beats = pianoroll.shape[0] // resolution
        ax.set_xticks(resolution * np.arange(n_beats) - 0.5)
        ax.set_xticklabels("")
        ax.set_xticks(resolution * (np.arange(n_beats) + 0.5) - 0.5,
                      minor=True)
        ax.set_xticklabels(np.arange(1, n_beats + 1), minor=True)
        ax.tick_params(axis="x", which="minor", width=0)

    # Format y-axis
    if ytick == "octave":
        ax.set_yticks(np.arange(0, 128, 12))
        if yticklabel == "name":
            ax.set_yticklabels(["C{}".format(i - 2) for i in range(11)])
    elif ytick == "step":
        ax.set_yticks(np.arange(0, 128))
        if yticklabel == "name":
            if is_drum:
                ax.set_yticklabels(
                    [note_number_to_drum_name(i) for i in range(128)])
            else:
                ax.set_yticklabels(
                    [note_number_to_name(i) for i in range(128)])
    elif ytick != "off":
        raise ValueError(
            f"`ytick` must be one of 'octave', 'pitch' or 'off', not {ytick}.")

    # Format axis labels
    if label not in ("x", "y", "both", "off"):
        raise ValueError(
            f"`label` must be one of 'x', 'y', 'both' or 'off', not {label}.")

    if label in ("x", "both"):
        if xtick == "step" or not xticklabel:
            ax.set_xlabel("time (step)")
        else:
            ax.set_xlabel("time (beat)")

    if label in ("y", "both"):
        if is_drum:
            ax.set_ylabel("key name")
        else:
            ax.set_ylabel("pitch")

    # Plot the grid
    if grid_axis not in ("x", "y", "both", "off"):
        raise ValueError(
            "`grid` must be one of 'x', 'y', 'both' or 'off', not "
            f"{grid_axis}.")
    if grid_axis != "off":
        ax.grid(
            axis=grid_axis,
            color="k",
            linestyle=grid_linestyle,
            linewidth=grid_linewidth,
        )

    # Plot downbeat boundaries
    if downbeats is not None:
        for downbeat in downbeats:
            ax.axvline(x=downbeat, color="k", linewidth=1)

    return img
コード例 #30
0
    def render_sample_data(self,
                           token: str,
                           sensor_modality: str = 'lidar',
                           with_anns: bool = True,
                           axes_limit: float = 30,
                           ax: Axes = None,
                           view_3d: np.ndarray = np.eye(4),
                           color_func: Any = None,
                           augment_previous: bool = False,
                           box_linewidth: int = 2,
                           filter_classes: List[str] = None,
                           max_dist: float = None,
                           out_path: str = None,
                           render_2d: bool = False) -> None:
        """
        Render sample data onto axis. Visualizes lidar in nuScenes lidar frame and camera in camera frame.
        :param token: KITTI token.
        :param sensor_modality: The modality to visualize, e.g. lidar or camera.
        :param with_anns: Whether to draw annotations.
        :param axes_limit: Axes limit for lidar data (measured in meters).
        :param ax: Axes onto which to render.
        :param view_3d: 4x4 view matrix for 3d views.
        :param color_func: Optional function that defines the render color given the class name.
        :param augment_previous: Whether to augment an existing plot (does not redraw pointcloud/image).
        :param box_linewidth: Width of the box lines.
        :param filter_classes: Optionally filter the classes to render.
        :param max_dist: Maximum distance in m to still draw a box.
        :param out_path: Optional path to save the rendered figure to disk.
        :param render_2d: Whether to render 2d boxes (only works for camera data).
        """
        # Default settings.
        if color_func is None:
            color_func = NuScenesExplorer.get_color

        boxes = self.get_boxes(token, filter_classes=filter_classes, max_dist=max_dist)  # In nuScenes lidar frame.

        if sensor_modality == 'lidar':
            # Load pointcloud.
            pc = self.get_pointcloud(token, self.root)  # In KITTI lidar frame.
            pc.rotate(self.kitti_to_nu_lidar.rotation_matrix)  # In nuScenes lidar frame.
            # Alternative options:
            # depth = pc.points[1, :]
            # height = pc.points[2, :]
            intensity = pc.points[3, :]

            # Project points to view.
            points = view_points(pc.points[:3, :], view_3d, normalize=False)
            coloring = intensity

            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 9))

            if not augment_previous:
                ax.scatter(points[0, :], points[1, :], c=coloring, s=1)
                ax.set_xlim(-axes_limit, axes_limit)
                ax.set_ylim(-axes_limit, axes_limit)

            if with_anns:
                for box in boxes:
                    color = np.array(color_func(box.name)) / 255
                    box.render(ax, view=view_3d, colors=(color, color, 'k'), linewidth=box_linewidth)

        elif sensor_modality == 'camera':
            im_path = KittiDB.get_filepath(token, 'image_2', root=self.root)
            im = Image.open(im_path)

            if ax is None:
                _, ax = plt.subplots(1, 1, figsize=(9, 16))

            if not augment_previous:
                ax.imshow(im)
                ax.set_xlim(0, im.size[0])
                ax.set_ylim(im.size[1], 0)

            if with_anns:
                if render_2d:
                    # Use KITTI's 2d boxes.
                    boxes_2d, names = self.get_boxes_2d(token, filter_classes=filter_classes)
                    for box, name in zip(boxes_2d, names):
                        color = np.array(color_func(name)) / 255
                        ax.plot([box[0], box[0]], [box[1], box[3]], color=color, linewidth=box_linewidth)
                        ax.plot([box[2], box[2]], [box[1], box[3]], color=color, linewidth=box_linewidth)
                        ax.plot([box[0], box[2]], [box[1], box[1]], color=color, linewidth=box_linewidth)
                        ax.plot([box[0], box[2]], [box[3], box[3]], color=color, linewidth=box_linewidth)
                else:
                    # Project 3d boxes to 2d.
                    transforms = self.get_transforms(token, self.root)
                    for box in boxes:
                        # Undo the transformations in get_boxes() to get back to the camera frame.
                        box.rotate(self.kitti_to_nu_lidar_inv)  # In KITTI lidar frame.
                        box.rotate(Quaternion(matrix=transforms['velo_to_cam']['R']))
                        box.translate(transforms['velo_to_cam']['T'])  # In KITTI camera frame, un-rectified.
                        box.rotate(Quaternion(matrix=transforms['r0_rect']))  # In KITTI camera frame, rectified.

                        # Filter boxes outside the image (relevant when visualizing nuScenes data in KITTI format).
                        if not box_in_image(box, transforms['p_left'][:3, :3], im.size, vis_level=BoxVisibility.ANY):
                            continue

                        # Render.
                        color = np.array(color_func(box.name)) / 255
                        box.render(ax, view=transforms['p_left'][:3, :3], normalize=True, colors=(color, color, 'k'),
                                   linewidth=box_linewidth)
        else:
            raise ValueError("Unrecognized modality {}.".format(sensor_modality))

        ax.axis('off')
        ax.set_title(token)
        ax.set_aspect('equal')

        # Render to disk.
        plt.tight_layout()
        if out_path is not None:
            plt.savefig(out_path)
コード例 #31
0
def render_image(ax: Axes, data):
    ax.imshow(data[0])
    ax.axis('off')
    ax.set_title(data[1], fontsize='x-small')
    ax.figure.tight_layout()