Ejemplo n.º 1
0
    def test_dynamic_keywords_and_kwargs(self):
        def fn(A='default', x=1, y=2, **kws):
            return Scatter([(x, y)], label=A)

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=['A'], streams=[xy])
        self.assertEqual(dmap['Test'], Scatter([(1, 2)], label='Test'))
Ejemplo n.º 2
0
    def test_dynamic_mixed_kwargs(self):
        def fn(x, A, y):
            return Scatter([(x, y)], label=A)

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=['A'], streams=[xy])
        self.assertEqual(dmap['Test'], Scatter([(1, 2)], label='Test'))
Ejemplo n.º 3
0
    def test_dynamic_streams_only_keywords(self):
        def fn(**kwargs):
            return Scatter([(kwargs['x'], kwargs['y'])], label='default')

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=[], streams=[xy])
        self.assertEqual(dmap[:], Scatter([(1, 2)], label='default'))
Ejemplo n.º 4
0
    def test_dynamic_split_args_and_kwargs(self):
        # Corresponds to the old style of kdims as posargs and streams
        # as kwargs, captured as *args and **kwargs
        def fn(*args, **kwargs):
            return Scatter([(kwargs['x'], kwargs['y'])], label=args[0])

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=['A'], streams=[xy])
        self.assertEqual(dmap['Test'], Scatter([(1, 2)], label='Test'))
Ejemplo n.º 5
0
    def test_dynamic_split_mismatched_kdims(self):
        # Corresponds to the old style of kdims as posargs and streams
        # as kwargs. Pointeral arg names don't have to match
        def fn(B, x=1, y=2):
            return Scatter([(x, y)], label=B)

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=['A'], streams=[xy])
        self.assertEqual(dmap['Test'], Scatter([(1, 2)], label='Test'))
Ejemplo n.º 6
0
    def test_dynamic_split_kdims_and_streams(self):
        # Corresponds to the old style of kdims as posargs and streams
        # as kwargs
        def fn(A, x=1, y=2):
            return Scatter([(x, y)], label=A)

        xy = streams.PointerXY(x=1, y=2)
        dmap = DynamicMap(fn, kdims=['A'], streams=[xy])
        self.assertEqual(dmap['Test'], Scatter([(1, 2)], label='Test'))
Ejemplo n.º 7
0
    def test_dynamic_split_kdims_and_streams_invalid(self):
        # Corresponds to the old style of kdims as posargs and streams
        # as kwargs. Pointeral arg names don't have to match
        def fn(x=1, y=2, B='default'):
            return Scatter([(x, y)], label=B)

        xy = streams.PointerXY(x=1, y=2)
        regexp = "Callback 'fn' signature over (.+?) does not accommodate required kdims"
        with self.assertRaisesRegexp(KeyError, regexp):
            DynamicMap(fn, kdims=['A'], streams=[xy])
Ejemplo n.º 8
0
    def test_dynamic_split_mismatched_kdims_invalid(self):
        # Corresponds to the old style of kdims as posargs and streams
        # as kwargs. Pointeral arg names don't have to match and the
        # stream parameters can be passed by position but *only* if they
        # come first
        def fn(x, y, B):
            return Scatter([(x, y)], label=B)

        xy = streams.PointerXY(x=1, y=2)
        regexp = ("Unmatched positional kdim arguments only allowed "
                  "at the start of the signature")
        with self.assertRaisesRegexp(KeyError, regexp):
            DynamicMap(fn, kdims=['A'], streams=[xy])
Ejemplo n.º 9
0
    def plot_interactive_image(grid):
        img = hv.Image(grid)
        # Declare pointer stream initializing at (0, 0) and linking to Image
        pointer = streams.PointerXY(x=0, y=0, source=img)

        # Define function to draw cross-hair and report value of image at location as text

        def cross_hair_info(x, y):
            text = hv.Text(x + 0.05,
                           y,
                           "%.3f %.3f %.3f" % (x, y, img[x, y]),
                           halign="left",
                           valign="bottom")
            return hv.HLine(y) * hv.VLine(x) * text

        # Overlay image and cross_hair_info
        return img * hv.DynamicMap(cross_hair_info, streams=[pointer])
def LoadAndCrop(video_dict,stretch={'width':1,'height':1},cropmethod='none'):
    
    #if batch processing, set file to first file to be processed
    if 'file' not in video_dict.keys():
        video_dict['file'] = video_dict['FileNames'][0]   
        print(video_dict['file'])
    
    #Upoad file and check that it exists
    video_dict['fpath'] = os.path.join(os.path.normpath(video_dict['dpath']), video_dict['file'])
    if os.path.isfile(video_dict['fpath']):
        print('file: {file}'.format(file=video_dict['fpath']))
        cap = cv2.VideoCapture(video_dict['fpath'])
    else:
        raise FileNotFoundError('{file} not found. Check that directory and file names are correct'.format(
            file=video_dict['fpath']))

    #Get maxiumum frame of file. Note that max frame is updated later if fewer frames detected
    cap_max = int(cap.get(7)) #7 is index of total frames
    print('total frames: {frames}'.format(frames=cap_max))

    #Set first frame
    cap.set(1,video_dict['start']) #first index references frame property, second specifies next frame to grab
    ret, frame = cap.read() 
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)   
    cap.release()
    print('dimensions: {x}'.format(x=frame.shape))

    #Make first image reference frame on which cropping can be performed
    image = hv.Image((np.arange(frame.shape[1]), np.arange(frame.shape[0]), frame))
    image.opts(width=int(frame.shape[1]*stretch['width']),
               height=int(frame.shape[0]*stretch['height']),
              invert_yaxis=True,cmap='gray',
              colorbar=True,
               toolbar='below',
              title="First Frame.  Crop if Desired")
    
    #Create polygon element on which to draw and connect via stream to poly drawing tool
    if cropmethod=='none':
        image.opts(title="First Frame")
        return image,None,video_dict
    
    if cropmethod=='Box':         
        box = hv.Polygons([])
        box.opts(alpha=.5)
        box_stream = streams.BoxEdit(source=box,num_objects=1)     
        return (image*box),box_stream,video_dict
    
    if cropmethod=='HLine':  
        points = hv.Points([])
        points.opts(active_tools=['point_draw'], color='white',size=1)
        pointerXY_stream = streams.PointerXY(x=0, y=0, source=image)
        pointDraw_stream = streams.PointDraw(source=points,num_objects=1)
            
        def h_track(x, y): #function to track pointer
            y = int(np.around(y))
            text = hv.Text(x, y, str(y), halign='left', valign='bottom')
            return hv.HLine(y) * text
        track=hv.DynamicMap(h_track, streams=[pointerXY_stream])
        
        def h_line(data): #function to draw line
            try:
                hline=hv.HLine(data['y'][0])
                return hline
            except:
                hline=hv.HLine(0)
                return hline
        line=hv.DynamicMap(h_line,streams=[pointDraw_stream])
        
        def h_text(data): #function to write ycrop value
            center=frame.shape[1]//2 
            try:
                y=int(np.around(data['y'][0]))
                htext=hv.Text(center,y+10,'ycrop: {x}'.format(x=y))
                return htext
            except:
                htext=hv.Text(center,10, 'ycrop: 0')
                return htext
        text=hv.DynamicMap(h_text,streams=[pointDraw_stream])
        
        return image*track*points*line*text,pointDraw_stream,video_dict   
# ## 2D plots with interactive slicing

# ls = np.linspace(0, 10, 200)
xx, yy = np.meshgrid(ls, ls)
bounds = (0, 0, 10, 10)  # Coordinate system: (left, bottom, right, top)

energy = hv.Dimension('energy', label='E', unit='MeV')
distance = hv.Dimension('distance', label='d', unit='m')
charge = hv.Dimension('charge', label='Q', unit='pC')

# +
image = hv.Image(np.sin(xx) * np.cos(yy),
                 bounds=bounds,
                 kdims=[energy, distance],
                 vdims=charge)
pointer = streams.PointerXY(x=5, y=5, source=image)

dmap = hv.DynamicMap(lambda x, y: hv.VLine(x) * hv.HLine(y), streams=[pointer])
x_sample = hv.DynamicMap(
    lambda x, y: image.sample(energy=x).opts(color='darkred'),
    streams=[pointer])
y_sample = hv.DynamicMap(
    lambda x, y: image.sample(distance=y).opts(color='lightsalmon'),
    streams=[pointer])

pointer_dmap = hv.DynamicMap(lambda x, y: hv.Points([(x, y)]),
                             streams=[pointer])
pointer_x_sample = hv.DynamicMap(lambda x, y: hv.Points([(y, image[x, y])]),
                                 streams=[pointer])
pointer_y_sample = hv.DynamicMap(lambda x, y: hv.Points([(x, image[x, y])]),
                                 streams=[pointer])
def LoadAndCrop(dpath, file, stretch_w=1, stretch_h=1, cropmethod='none'):

    #Upoad file and check that it exists
    fpath = dpath + "/" + file
    if os.path.isfile(fpath):
        print('file: ' + fpath)
        cap = cv2.VideoCapture(fpath)
    else:
        raise FileNotFoundError(
            'File not found. Check that directory and file names are correct.')

    #Get maxiumum frame of file. Note that max frame is updated later if fewer frames detected
    cap_max = int(cap.get(7))  #7 is index of total frames
    print('total frames: ' + str(cap_max))

    #Retrieve first frame
    cap.set(
        1, 0
    )  #first index references frame property, second specifies next frame to grab
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cap.release()

    #Make first image reference frame on which cropping can be performed
    image = hv.Image(
        (np.arange(gray.shape[1]), np.arange(gray.shape[0]), gray))
    image.opts(width=int(gray.shape[1] * stretch_w),
               height=int(gray.shape[0] * stretch_h),
               invert_yaxis=True,
               cmap='gray',
               colorbar=True,
               toolbar='below',
               title="First Frame.  Crop if Desired")

    #Create polygon element on which to draw and connect via stream to poly drawing tool
    if cropmethod == 'none':
        image.opts(title="First Frame")
        return image, None, fpath

    if cropmethod == 'Box':
        box = hv.Polygons([])
        box.opts(alpha=.5)
        box_stream = streams.BoxEdit(source=box, num_objects=1)
        return (image * box), box_stream, fpath

    if cropmethod == 'HLine':
        points = hv.Points([])
        points.opts(active_tools=['point_draw'], color='white', size=1)
        pointerXY_stream = streams.PointerXY(x=0, y=0, source=image)
        pointDraw_stream = streams.PointDraw(source=points, num_objects=1)

        def h_track(x, y):  #function to track pointer
            y = int(np.around(y))
            text = hv.Text(x, y, str(y), halign='left', valign='bottom')
            return hv.HLine(y) * text

        track = hv.DynamicMap(h_track, streams=[pointerXY_stream])

        def h_line(data):  #function to draw line
            try:
                hline = hv.HLine(data['y'][0])
                return hline
            except:
                hline = hv.HLine(0)
                return hline

        line = hv.DynamicMap(h_line, streams=[pointDraw_stream])

        def h_text(data):  #function to write ycrop value
            center = gray.shape[1] // 2
            try:
                y = int(np.around(data['y'][0]))
                htext = hv.Text(center, y + 10, 'ycrop: {x}'.format(x=y))
                return htext
            except:
                htext = hv.Text(center, 10, 'ycrop: 0')
                return htext

        text = hv.DynamicMap(h_text, streams=[pointDraw_stream])

        return image * track * points * line * text, pointDraw_stream, fpath
Ejemplo n.º 13
0
def explore_latent_space(
    data: VisionDataset,
    encode_fn: Callable[[Tensor], Tensor],
    decode_fn: Callable[[Tensor], Tensor],
    data_to_encode: str = "img",
    encodings_label: str = None,
    device: str = "cuda",
    batch_size: int = 256,
) -> Panel:
    """Generates panel to interactively visualize an autoencoder's latent space distribution and individual samples.

    Args:
        data: Dataset that returns pairs of images and targets, where the target can either be a classification label or
            a segmentation map.
        encode_fn: Function that uses an encoder neural net to predict a latent vector associated with the high
            dimensional input data.
        decode_fn: Function that uses a decoder neural net to decode a latent vector back to a high dimensional data
            sample.
        data_to_encode: One of "img" or "target", indicating which data provided by the dataset to feed to the encoding
            function.
        encodings_label: Either "target" or `None`, indicating whether to use the data items' targets (in classification
            tasks) as labels by which to color the latent vectors in the plot.
        device: Device on which to perform the neural nets' computations.
        batch_size: Size of the batch to use when initially encoding the dataset's items in the latent space.

    Returns:
        Interactive panel to interactively visualize the autoencoder's latent space distribution and individual samples.
    """
    data_to_encode_values = ["img", "target"]
    if data_to_encode not in data_to_encode_values:
        raise ValueError(
            f"Invalid value for `data_to_encode` flag. You passed: '{data_to_encode}', "
            f"but it should be one of {data_to_encode_values}.")

    if encodings_label == "target" and data_to_encode == "target":
        raise ValueError(
            "You requested conflicting options: encode the dataset's targets / label encodings w.r.t targets. "
            "Either switch to encode the images instead (`data_to_encode='img'`), "
            "or give up labelling the encodings (`encodings_label=None`).")

    # Encode the dataset
    print("Encoding the dataset items in the latent space...")
    encodings, targets = [], []
    for img, target in DataLoader(data, batch_size=batch_size):
        img = img.to(device)
        target = target.to(device)

        data = img if data_to_encode == "img" else target
        z = encode_fn(data)

        encodings.extend(z.cpu().detach().numpy())

        if encodings_label == "target":
            targets.extend(target.cpu().detach().numpy())

    encodings = np.array(encodings)
    targets = np.array(targets)[:, None]

    # If the latent space is not already 2D, use the UMAP dimensionality reduction algorithm
    # to learn a projection between the latent space and a 2D space ready to be displayed
    latent_space_ndim = encodings.shape[-1]
    high_dim_latent_space = latent_space_ndim > 2
    if high_dim_latent_space:
        print("Learning UMAP embedding for latent space vectors...")
        reducer = umap.UMAP()
        encodings = reducer.fit_transform(encodings)

    if encodings_label == "target":
        encoded_points = hv.Points(np.hstack((encodings, targets)),
                                   vdims=["target"]).opts(color="target",
                                                          cmap="Category10",
                                                          colorbar=True)
    else:
        encoded_points = hv.Points(encodings)

    # Track the user's pointer in the scatter plot
    pointer = streams.PointerXY(x=0.0, y=0.0, source=encoded_points)

    # Setup callbacks to automatically decode selected points
    def decode_point(x, y) -> hv.Image:
        latent_space_point = np.array([x, y])[None]
        if high_dim_latent_space:
            # Project the 2D sample back into the higher-dimensionality latent space
            # using UMAP's learned inverse transform
            latent_space_point = reducer.inverse_transform(latent_space_point)
        point_tensor = torch.tensor(latent_space_point,
                                    dtype=torch.float,
                                    device=device)
        decoded_img = decode_fn(point_tensor).squeeze(dim=0)
        return hv.Image(decoded_img.cpu().detach().numpy())

    decoded_point = hv.DynamicMap(decode_point,
                                  streams=[pointer
                                           ]).opts(opts.Image(axiswise=True))

    # Common options for the main panels to display
    encodings_title = (
        "Latent space" if not high_dim_latent_space else
        f"2D UMAP embedding of the {latent_space_ndim}D latent space")
    return encoded_points.opts(
        width=600, height=600, title=encodings_title) + decoded_point.opts(
            xaxis=None, yaxis=None, cmap="gray", title="Decoded sample")
Ejemplo n.º 14
0
class FreehandEditor(param.Parameterized):
    '''Adds a freehand drawing tool that embeds the drawn path in the image/stack'''

    dataset = param.Parameter(EditableHvDataset(), precedence=-1)
    freehand = param.Parameter(streams.FreehandDraw(num_objects=1),
                               precedence=-1,
                               instantiate=True)
    pointer_pos = param.Parameter(streams.PointerXY(),
                                  precedence=-1,
                                  instantiate=True)
    clicked_pos = param.Parameter(streams.SingleTap(transient=True),
                                  precedence=-1,
                                  instantiate=True)
    pipe = param.Parameter(streams.Pipe(data=[]),
                           instantiate=True,
                           precedence=-1)
    path_plot = param.Parameter(hv.Path([]), precedence=-1)

    cmap = param.Parameter(glasbey_hv_16bit, precedence=-1)
    zoom_level = param.Number(1.0, precedence=-1)
    tool_width = param.Integer(20, bounds=(1, 300))

    zoom_range = param.Parameter(
        streams.RangeX(),
        doc=
        '''range stream used to adjust glyph size based on zoom level, assumes data_aspect=1''',
        precedence=-1,
        instantiate=True)
    plot_size = param.Parameter(streams.PlotSize(), precedence=-1)
    zoom_level = param.Number(1.0, precedence=-1)
    zoom_initialized = param.Boolean(False, precedence=-1)

    swap_axes = param.Boolean(False)
    draw_in_3D = param.Boolean(False)

    slicer = param.Parameter(None)

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)

        # grey glyph for drawing label -1 (unlabeled)
        self.cmap[-1] = '#999999'

        self.path_plot = hv.DynamicMap(self.plot_path, streams=[self.pipe])
        self.freehand.source = self.path_plot

        self.path_plot.opts(opts.Path(active_tools=['freehand_draw']))
        self.pointer_pos.source = self.path_plot
        self.clicked_pos.source = self.path_plot
        self.zoom_range.source = self.path_plot
        self.plot_size.source = self.path_plot

        self.path_plot = self.path_plot * hv.DynamicMap(self.plot_pointer)


#         self.path_plot = hv.DynamicMap(self.plot_pointer)

    @param.depends('zoom_range.x_range', 'plot_size.width', watch=True)
    def monitor_zoom_level(self):
        plot_width = self.plot_size.width
        if plot_width:
            zrange = self.zoom_range.x_range

            if zrange is None or np.isnan(zrange[0]) or np.isnan(zrange[1]):
                self.zoom_level = plot_width / self.dataset.img.shape[1]

            else:
                zoomed_width = zrange[1] - zrange[0]
                self.zoom_level = min(
                    self.dataset.spacing) * plot_width / zoomed_width

    @param.depends('dataset.drawing_label', 'tool_width', 'zoom_level')
    def plot_path(self, data):
        # at this stage, plot is rendered and size known
        if not self.zoom_initialized:
            self.monitor_zoom_level()
            self.zoom_initialized = True

        path = hv.Path(data)
        path.opts(
            opts.Path(line_width=self.tool_width * self.zoom_level + 1,
                      color=self.cmap[self.dataset.drawing_label],
                      line_cap='round',
                      line_join='round'))

        return path

    @param.depends('dataset.drawing_label', 'tool_width', 'zoom_level',
                   'pointer_pos.x', 'pointer_pos.y')
    def plot_pointer(self):

        if not self.zoom_initialized:
            self.monitor_zoom_level()
            self.zoom_initialized = True

        pos_x = self.pointer_pos.x
        if pos_x is None:
            pos_x = 0.

        pos_y = self.pointer_pos.y
        if pos_y is None:
            pos_y = 0.

        pt = hv.Points((pos_x, pos_y))
        pt.opts(
            opts.Points(
                size=self.tool_width * self.zoom_level,
                color=self.cmap[self.dataset.drawing_label],
                shared_axes=True,
            ))

        return pt

    @staticmethod
    def _get_axis_id(axis_name):
        return {'z': 0, 'y': 1, 'x': 2}[axis_name]

    @staticmethod
    def _get_plane_axes_id(axis_name):
        return {'z': [1, 2], 'y': [0, 2], 'x': [0, 1]}[axis_name]

    def _phy_to_px(self, coords, return_spacing=False):
        '''Converts physical coords to pxiel coordinates'''

        if self.swap_axes:
            coords = coords[:, ::-1]

        spacing = np.asarray(self.dataset.spacing)
        if self.dataset.img.ndim > 2:
            plane_axes = self._get_plane_axes_id(self.slicer.axis)
            spacing = spacing[plane_axes]

        coords = coords / spacing[::-1][None]

        px_coords = np.rint(coords).astype(np.int32)
        if return_spacing:
            return px_coords, spacing
        else:
            return px_coords

    @param.depends('freehand.data', watch=True)
    def embedd_path(self):
        '''write the polygon path on rasterized array with correct label and width'''

        coords = self.freehand.data
        if coords and coords['ys']:
            pts = np.stack(
                [np.asarray(coords['xs'][0]),
                 np.asarray(coords['ys'][0])],
                axis=1)

            pts, spacing = self._phy_to_px(pts, return_spacing=True)
            mask = np.zeros_like(self.dataset.img, np.uint8)
            if mask.ndim > 2:
                axis = self._get_axis_id(self.slicer.axis)
                loc = [slice(None) for _ in range(mask.ndim)]
                loc[axis] = int(self.slicer.slice_id /
                                self.dataset.spacing[axis])
                submask = np.zeros(mask[loc].shape, dtype=np.uint8)

                # handle anisotrpic slice --> draw 1 px line and expand with distance transform
                cv.polylines(
                    submask,
                    [pts],
                    False,
                    1,
                    1,  # // 2,
                    cv.LINE_8)

                min_spacing = min(spacing)
                if self.draw_in_3D:
                    mask[loc] = submask
                    dist = distance_transform_edt(
                        ~mask.astype(bool),
                        sampling=self.dataset.spacing / min_spacing)
                    mask = dist <= self.tool_width / 2

                else:
                    dist = distance_transform_edt(
                        ~submask.astype(bool),
                        sampling=(spacing[0] / min_spacing,
                                  spacing[1] / min_spacing))
                    mask[loc] = dist <= self.tool_width / 2

            else:
                # draw polyline on minimal size crop
                margin = self.tool_width // 2 + 1
                loc = [
                    slice(max(0, pts[:, ax].min() - margin),
                          pts[:, ax].max() + margin) for ax in range(2)
                ]
                offset = np.array([s.start for s in loc])[None]
                loc = loc[::-1]
                submask = mask[loc]
                cv.polylines(
                    submask,
                    [pts - offset],
                    False,
                    1,
                    self.tool_width,  # // 2,
                    cv.LINE_8)

            mask = mask.astype(bool)

            self.dataset.write_label(mask)
            self._clear()

    @param.depends('clicked_pos.x', 'clicked_pos.y', watch=True)
    def monitor_clicked_label(self):
        if self.clicked_pos.x is not None and self.clicked_pos.y is not None:
            coords = self._phy_to_px(
                np.asarray([[self.clicked_pos.x, self.clicked_pos.y]]))
            coords = coords[0, ::-1]

            if self.dataset.img.ndim > 2:
                axis = self._get_axis_id(self.slicer.axis)
                slice_id = int(self.slicer.slice_id /
                               self.dataset.spacing[axis])
                coords = np.insert(coords, axis, slice_id)

            self.dataset.click_callback(coords)

    def _clear(self):
        self.pipe.send([])

    def widgets(self):
        wg = self.dataset.widgets()
        wg.append(self.param.tool_width)
        return wg
Ejemplo n.º 15
0
 def setup_streams(self):
     self.range_xy = streams.RangeXY()
     self.pipe = streams.Pipe(data=[])
     self.pointer = streams.PointerXY()