Exemplo n.º 1
0
    def __post_init__(self):
        """
        :return:
        """
        data = self.spectral_cube.data

        self.ds = hv.Dataset((np.arange(data.shape[2]), np.arange(
            data.shape[1]), np.arange(data.shape[0]), data),
                             [self.spectral_axis_name, 'x', 'y'], 'Cube')
        # maybe PolyEdit as well
        # polys = hv.Polygons([hv.Box(int(self.image_width / 2), int(self.image_height / 2), int(self.image_height / 2))])
        # self.box_stream = streams.PolyEdit(source=polys)
        polys = hv.Polygons([])
        self.box_stream = streams.BoxEdit(source=polys)

        hlines = hv.HoloMap({i: hv.VLine(i)
                             for i in range(data.shape[2])}, 'wavelengths')
        dmap = hv.DynamicMap(self.roi_curves, streams=[self.box_stream])

        im = self.ds.to(hv.Image, ['x', 'y'], dynamic=True)
        self.layout = (im * polys + dmap * hlines).opts(
            opts.Image(cmap=self.color_map,
                       width=self.image_width,
                       height=self.image_height),
            opts.Curve(width=650, height=450, framewise=True),
            opts.Polygons(fill_alpha=0.2, line_color='white'),
            opts.VLine(color='black'))
Exemplo n.º 2
0
class RoiEditor(param.Parameterized):
    '''mixin class to add bounding box editor capabilities'''

    roi_plot = param.Parameter(hv.Rectangles([], group='ROIedit'))
    box_edit = param.Parameter(streams.BoxEdit(), instantiate=True)
    spacing = param.NumericTuple((1, 1), doc='2D pixel size')

    def __init__(self, *args, **kwargs):
        num_objects = kwargs.pop('num_objects', 1)
        super().__init__(*args, **kwargs)

        self.box_edit.num_objects = num_objects
        self.box_edit.source = self.roi_plot

    def img_slice(self):
        '''return image slice in px coordinates'''
        if self.box_edit.data is None or not self.box_edit.data['x0']:
            return None

        # repack dict of 4 lists as a list of (x0,x1,y0,y1)
        rois = list(zip(*self.box_edit.data.values()))

        loc = [(slice(max(0, round(y1 / self.spacing[0])),
                      max(0, round(y0 / self.spacing[0]))),
                slice(max(0, round(x0 / self.spacing[1])),
                      max(0, round(x1 / self.spacing[1]))))
               for x0, x1, y0, y1 in rois]
        return loc
Exemplo n.º 3
0
def interactive_crop(
    video_path,
    frame=0,
):
    """
    Loads and displays a frame for a video to be used for cropping.

    Cropping automatically updated using holoviews stream object.

    Args:
        video_path (str): Path to the video
        frame (int): The index of the frame to be used for cropping
    Returns:
        image, stream
    """

    hv.notebook_extension("bokeh")
    cap = cv2.VideoCapture(video_path)
    cap.set(cv2.CAP_PROP_POS_FRAMES, frame)
    _, frame = cap.read()
    print(frame.shape)
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    # frame = frame[::-1, :]  # inverse y axis for plotting
    cap.release()

    image = hv.Image(
        (np.arange(frame.shape[1]), np.arange(frame.shape[0]), frame))
    image.opts(
        width=frame.shape[1],
        height=frame.shape[0],
        cmap="gray",
        colorbar=True,
        toolbar="below",
        title="First Frame.  Crop if Desired",
    )
    box = hv.Polygons([])
    box.opts(alpha=0.5)
    box_stream = streams.BoxEdit(source=box, num_objects=1)
    return (image * box), box_stream
Exemplo n.º 4
0
vmax = 40
vmin = -vmax
f_width = 300
hvc_opts = dict(logy = True, cmap = 'RdBu_r', symmetric=True, colorbar = True, \
                tools = ['hover'], invert_yaxis=True, frame_width = f_width)
im = ds.to(hv.QuadMesh, ['x', 'y'],
           dynamic=True).redim.range(coefs=(vmin, vmax)).opts(**hvc_opts)
im2 = ds.aggregate(['x', 'y'], np.mean).to(hv.QuadMesh, ['x', 'y'],
                                           dynamic=True)
im2 = im2.redim.range(coefs=(vmin, vmax)).opts(**hvc_opts)

# ## Create Room-of-interest-function

# +
polys = hv.Polygons([])
box_stream = streams.BoxEdit(source=polys)


def roi_curves(data):
    if not data or not any(len(d) for d in data.values()):
        return hv.NdOverlay({0: hv.Curve([], 'ens', 'coefs')})

    curves = {}
    data = zip(data['x0'], data['x1'], data['y0'], data['y1'])
    for i, (x0, x1, y0, y1) in enumerate(data):
        selection = ds.select(x=(x0, x1),
                              y=(y1,
                                 y0))  # swap y0 and y1 when inverted y-axis
        curves[i] = hv.Spread(selection.aggregate('ens', np.mean, np.std))
    return hv.NdOverlay(curves)
def LoadAndCrop(video_dict,stretch={'width':1,'height':1},cropmethod='none'):
    
    #if batch processing, set file to first file to be processed
    if 'file' not in video_dict.keys():
        video_dict['file'] = video_dict['FileNames'][0]   
        print(video_dict['file'])
    
    #Upoad file and check that it exists
    video_dict['fpath'] = os.path.join(os.path.normpath(video_dict['dpath']), video_dict['file'])
    if os.path.isfile(video_dict['fpath']):
        print('file: {file}'.format(file=video_dict['fpath']))
        cap = cv2.VideoCapture(video_dict['fpath'])
    else:
        raise FileNotFoundError('{file} not found. Check that directory and file names are correct'.format(
            file=video_dict['fpath']))

    #Get maxiumum frame of file. Note that max frame is updated later if fewer frames detected
    cap_max = int(cap.get(7)) #7 is index of total frames
    print('total frames: {frames}'.format(frames=cap_max))

    #Set first frame
    cap.set(1,video_dict['start']) #first index references frame property, second specifies next frame to grab
    ret, frame = cap.read() 
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)   
    cap.release()
    print('dimensions: {x}'.format(x=frame.shape))

    #Make first image reference frame on which cropping can be performed
    image = hv.Image((np.arange(frame.shape[1]), np.arange(frame.shape[0]), frame))
    image.opts(width=int(frame.shape[1]*stretch['width']),
               height=int(frame.shape[0]*stretch['height']),
              invert_yaxis=True,cmap='gray',
              colorbar=True,
               toolbar='below',
              title="First Frame.  Crop if Desired")
    
    #Create polygon element on which to draw and connect via stream to poly drawing tool
    if cropmethod=='none':
        image.opts(title="First Frame")
        return image,None,video_dict
    
    if cropmethod=='Box':         
        box = hv.Polygons([])
        box.opts(alpha=.5)
        box_stream = streams.BoxEdit(source=box,num_objects=1)     
        return (image*box),box_stream,video_dict
    
    if cropmethod=='HLine':  
        points = hv.Points([])
        points.opts(active_tools=['point_draw'], color='white',size=1)
        pointerXY_stream = streams.PointerXY(x=0, y=0, source=image)
        pointDraw_stream = streams.PointDraw(source=points,num_objects=1)
            
        def h_track(x, y): #function to track pointer
            y = int(np.around(y))
            text = hv.Text(x, y, str(y), halign='left', valign='bottom')
            return hv.HLine(y) * text
        track=hv.DynamicMap(h_track, streams=[pointerXY_stream])
        
        def h_line(data): #function to draw line
            try:
                hline=hv.HLine(data['y'][0])
                return hline
            except:
                hline=hv.HLine(0)
                return hline
        line=hv.DynamicMap(h_line,streams=[pointDraw_stream])
        
        def h_text(data): #function to write ycrop value
            center=frame.shape[1]//2 
            try:
                y=int(np.around(data['y'][0]))
                htext=hv.Text(center,y+10,'ycrop: {x}'.format(x=y))
                return htext
            except:
                htext=hv.Text(center,10, 'ycrop: 0')
                return htext
        text=hv.DynamicMap(h_text,streams=[pointDraw_stream])
        
        return image*track*points*line*text,pointDraw_stream,video_dict   
Exemplo n.º 6
0
def LoadAndCrop(video_dict,
                stretch={
                    'width': 1,
                    'height': 1
                },
                cropmethod=None,
                fstfile=False):
    """ 
    -------------------------------------------------------------------------------------
    
    Loads video and creates interactive cropping tool from first frame. In the 
    case of batch processing, the first frame of the first video is used. Additionally, 
    when batch processing, the same cropping parameters will be appplied to every video.  
    Care should therefore be taken that the region of interest is in the same position across 
    videos.
    
    -------------------------------------------------------------------------------------
    Args:
        video_dict:: [dict]
            Dictionary with the following keys:
                'dpath' : directory containing files [str]
                'file' : filename with extension, e.g. 'myvideo.wmv' [str]
                'fps' : frames per second of video files to be processed [int]
                'start' : frame at which to start. 0-based [int]
                'end' : frame at which to end.  set to None if processing 
                        whole video [int]
                'ftype' : (only if batch processing) 
                          video file type extension (e.g. 'wmv') [str]
                'FileNames' : (only if batch processing)
                              List of filenames of videos in folder to be batch 
                              processed.  [list]
                
        stretch:: [dict]
            Dictionary with the following keys:
                'width' : proportion by which to stretch frame width [float]
                'height' : proportion by which to stretch frame height [float]
                
        cropmethod:: [str]
            Method of cropping video.  cropmethod takes the following values:
                None : No cropping 
                'Box' : Create box selection tool for cropping video
                
        fstfile:: [bool]
            Dictates whether to use first file in video_dict['FileNames'] to generate
            reference.  True/False
    
    -------------------------------------------------------------------------------------
    Returns:
        image:: [holoviews.Image]
            Holoviews hv.Image displaying first frame
            
        stream:: [holoviews.streams.stream]
            Holoviews stream object enabling dynamic selection in response to 
            cropping tool. `stream.data` contains x and y coordinates of crop
            boundary vertices.
            
        video_dict:: [dict]
            Dictionary with the following keys:
                'dpath' : directory containing files [str]
                'file' : filename with extension, e.g. 'myvideo.wmv' [str]
                'fps' : frames per second of video file/files to be processed [int]
                'start' : frame at which to start. 0-based [int]
                'end' : frame at which to end.  set to None if processing whole 
                        video [int]
                'ftype' : (only if batch processing) 
                          video file type extension (e.g. 'wmv') [str]
                'FileNames' : (only if batch processing)
                              List of filenames of videos in folder to be 
                              batch processed.  [list]
    
    -------------------------------------------------------------------------------------
    Notes:
        - in the case of batch processing, video_dict['file'] is set to first 
          video in file 
        - prior cropping method HLine has been removed
    
    """

    #if batch processing, set file to first file to be processed
    video_dict[
        'file'] = video_dict['FileNames'][0] if fstfile else video_dict['file']

    #Upoad file and check that it exists
    video_dict['fpath'] = os.path.join(os.path.normpath(video_dict['dpath']),
                                       video_dict['file'])
    if os.path.isfile(video_dict['fpath']):
        print('file: {file}'.format(file=video_dict['fpath']))
        cap = cv2.VideoCapture(video_dict['fpath'])
    else:
        raise FileNotFoundError(
            '{file} not found. Check that directory and file names are correct'
            .format(file=video_dict['fpath']))

    #Get maxiumum frame of file. Note that max frame is updated later if fewer frames detected
    cap_max = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    print('total frames: {frames}'.format(frames=cap_max))

    #Set first frame.
    try:
        cap.set(cv2.CAP_PROP_POS_FRAMES, video_dict['start'])
    except:
        cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
    ret, frame = cap.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cap.release()

    #Make first image reference frame on which cropping can be performed
    image = hv.Image(
        (np.arange(frame.shape[1]), np.arange(frame.shape[0]), frame))
    image.opts(width=int(frame.shape[1] * stretch['width']),
               height=int(frame.shape[0] * stretch['height']),
               invert_yaxis=True,
               cmap='gray',
               colorbar=True,
               toolbar='below',
               title="First Frame.  Crop if Desired")

    #Create polygon element on which to draw and connect via stream to poly drawing tool
    if cropmethod == None:
        image.opts(title="First Frame")
        return image, None, video_dict

    if cropmethod == 'Box':
        box = hv.Polygons([])
        box.opts(alpha=.5)
        box_stream = streams.BoxEdit(source=box, num_objects=1)
        return (image * box), box_stream, video_dict
# highlights = ["None", "Pause", "Continuous Watch"]
highlights = ["None", "Pauses", "Skips"]

# instantiate the input text boxes where information can be added to enable uploading data to the database
input_name = pn.widgets.TextInput(
    name="Name of Annotation", placeholder="Enter a name for the annotation")
input_annotator_id = pn.widgets.TextInput(
    name="Annotator ID", placeholder="Enter your annotator ID (e.g. 'p1')")
input_additional_info = pn.widgets.TextInput(
    name="Additional Information",
    placeholder="Optional additional information...")

# instantiate the box edit tool
boxes = hv.Rectangles([])
box_stream = streams.BoxEdit(source=boxes,
                             num_objects=100,
                             styles={'fill_color': ['red']})
boxes.opts(opts.Rectangles(active_tools=['box_edit'], fill_alpha=0.5))
# define and activate hover tool for box edits, so the index of the box is shown, when the mouse is hovering over boxes
hover = HoverTool(tooltips=[
    ("index", "$index"),
])
boxes.opts(tools=[hover])

#################################
# Fxns for making static raster #
#################################


def get_patient_session_info():
def LoadAndCrop(dpath, file, stretch_w=1, stretch_h=1, cropmethod='none'):

    #Upoad file and check that it exists
    fpath = dpath + "/" + file
    if os.path.isfile(fpath):
        print('file: ' + fpath)
        cap = cv2.VideoCapture(fpath)
    else:
        raise FileNotFoundError(
            'File not found. Check that directory and file names are correct.')

    #Get maxiumum frame of file. Note that max frame is updated later if fewer frames detected
    cap_max = int(cap.get(7))  #7 is index of total frames
    print('total frames: ' + str(cap_max))

    #Retrieve first frame
    cap.set(
        1, 0
    )  #first index references frame property, second specifies next frame to grab
    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    cap.release()

    #Make first image reference frame on which cropping can be performed
    image = hv.Image(
        (np.arange(gray.shape[1]), np.arange(gray.shape[0]), gray))
    image.opts(width=int(gray.shape[1] * stretch_w),
               height=int(gray.shape[0] * stretch_h),
               invert_yaxis=True,
               cmap='gray',
               colorbar=True,
               toolbar='below',
               title="First Frame.  Crop if Desired")

    #Create polygon element on which to draw and connect via stream to poly drawing tool
    if cropmethod == 'none':
        image.opts(title="First Frame")
        return image, None, fpath

    if cropmethod == 'Box':
        box = hv.Polygons([])
        box.opts(alpha=.5)
        box_stream = streams.BoxEdit(source=box, num_objects=1)
        return (image * box), box_stream, fpath

    if cropmethod == 'HLine':
        points = hv.Points([])
        points.opts(active_tools=['point_draw'], color='white', size=1)
        pointerXY_stream = streams.PointerXY(x=0, y=0, source=image)
        pointDraw_stream = streams.PointDraw(source=points, num_objects=1)

        def h_track(x, y):  #function to track pointer
            y = int(np.around(y))
            text = hv.Text(x, y, str(y), halign='left', valign='bottom')
            return hv.HLine(y) * text

        track = hv.DynamicMap(h_track, streams=[pointerXY_stream])

        def h_line(data):  #function to draw line
            try:
                hline = hv.HLine(data['y'][0])
                return hline
            except:
                hline = hv.HLine(0)
                return hline

        line = hv.DynamicMap(h_line, streams=[pointDraw_stream])

        def h_text(data):  #function to write ycrop value
            center = gray.shape[1] // 2
            try:
                y = int(np.around(data['y'][0]))
                htext = hv.Text(center, y + 10, 'ycrop: {x}'.format(x=y))
                return htext
            except:
                htext = hv.Text(center, 10, 'ycrop: 0')
                return htext

        text = hv.DynamicMap(h_text, streams=[pointDraw_stream])

        return image * track * points * line * text, pointDraw_stream, fpath
Exemplo n.º 9
0
    def view(
        self,
        *,
        channels,
        rscale=None,
        gscale=None,
        bscale=None,
        percentile=98,
        show_miniview=True,
        height=600,
        resolution=900,
    ):
        self.channels = channels
        self.resolution = resolution
        self.rscale = rscale or self.ds.display_interval(
            channels[0], percentile)
        self.gscale = gscale or self.ds.display_interval(
            channels[1], percentile)
        self.bscale = bscale or self.ds.display_interval(
            channels[2], percentile)

        self.setup_streams()
        self.setup_controller(channels=channels)
        self.setup_template(height=height)

        tooltips = [
            ("x", "$x{(0)}"),
            ("y", "$y{(0)}"),
        ]
        hover = HoverTool(tooltips=tooltips)
        self.main = self.mainview().opts(
            clone=True,
            responsive=True,
            hooks=[remove_bokeh_logo],
            default_tools=[hover],
            title=f"Sample: {self.name}",
        )

        boxes = hv.Rectangles([])
        self.box_stream = streams.BoxEdit(
            source=boxes,
            styles={"fill_color": ["yellow", "red", "green", "blue", "cyan"]},
        )
        boxes = boxes.opts(hv.opts.Rectangles(active_tools=[], fill_alpha=0.5))

        overlay = hd.regrid(hv.Image([]), streams=[self.pointer])

        if show_miniview:
            mini = (self.miniview().clone(link=False).opts(
                width=200,
                height=200,
                xaxis=None,
                yaxis=None,
                default_tools=[],
                shared_axes=False,
                hooks=[remove_bokeh_logo],
            ))
            zoom = self.zoomview().opts(
                width=200,
                height=200,
                xaxis=None,
                yaxis=None,
                default_tools=[],
                shared_axes=False,
                hooks=[remove_bokeh_logo],
            )
            RangeToolLink(mini, self.main, axes=["x", "y"])
            self.tmpl.add_panel(
                "A",
                pn.Row(
                    pn.panel(self.main * overlay * boxes),
                    pn.Column(pn.panel(mini), pn.panel(zoom)),
                    width=400,
                    height=280,
                    sizing_mode="scale_both",
                ),
            )
        else:
            self.tmpl.add_panel("A", pn.Row(pn.panel(self.main)))
        self.tmpl.add_panel("C", self.controller)
        return self.tmpl