def LiveTracks(Raw, Seg, scale, locationID, RegionID, VolumeID, ID, StartID): with napari.gui_qt(): if Raw is not None: viewer = napari.view_image(Raw, scale=scale, name='Image') Labels = viewer.add_labels(Seg, scale=scale, name='SegImage') else: viewer = napari.view_image(Seg, scale=scale, name='SegImage') trackbox = QComboBox() trackbox.addItem(Boxname) for i in range(0, len(ID)): trackbox.addItem(str(ID[i])) figure = plt.figure(figsize=(4, 4)) multiplot_widget = FigureCanvas(figure) ax = multiplot_widget.figure.subplots(2, 2) viewer.window.add_dock_widget(multiplot_widget, name="TrackStats", area='right') multiplot_widget.figure.tight_layout() trackbox.currentIndexChanged.connect( lambda trackid=trackbox: TrackViewer( viewer, Raw, Seg, locationID, RegionID, VolumeID, scale, trackbox.currentText(), StartID, multiplot_widget, ax)) viewer.window.add_dock_widget(trackbox, name="TrackID", area='left')
def get_registering_points(reference, registering, fixed_points): print( f"\n\nStarting registration: click on the points in the registering image" + "that match those on the reference image.\n" + "Press 'q' to close the viewers.") # Crete reference image with marked points reference_marked = create_marked_ref_image(reference.copy(), fixed_points) # Reshape registering image to reference image shape cols, rows, chs = reference.shape registering = cv2.resize(registering, (rows, cols)) with napari.gui_qt(): # add the registering image registering_viewer = napari.view_image(registering, name='registering', title='Registering points') # registering_viewer.window._qt_window.showFullScreen() registering_viewer.cursor = 'pointing' # User input -> points points_layer = registering_viewer.add_points(size=POINTS_SIZE, edge_color='k', edge_width=EDGE_WIDTH, face_color='springgreen', name='registering_points') points_layer.mode = 'add' registering_viewer.active_layer = points_layer registering_viewer.cursor = 'pointing' # create a viewer for the reference image image reference_viewer = napari.view_image(reference_marked, name='reference', title='Fixed points') viewers = [reference_viewer, registering_viewer] # Add keybindings @napari.Viewer.bind_key('q', overwrite=True) def close_viewer(viewer): for viewer in viewers: viewer.close() @points_layer.mouse_drag_callbacks.append def check_MIN_N_POINTS(layer, event): if len(points_layer.data) == len(fixed_points): for viewer in viewers: viewer.close() if not len(points_layer.data) == len(fixed_points): raise ValueError( f"{len(points_layer.data)} were clicked, but there were {len(fixed_points)} on the reference image.\n" + "Please try again.") return clean_check_points( points_layer.data, img_type='Registering' )[::-1] # ? need to reverse the order for some reason
def napari_show(dat, contrast_limits=None): import napari if contrast_limits is None: contrast_limits = [dat[0].min(), dat[0].max()] with napari.gui_qt(): try: napari.view_image(dat, contrast_limits=contrast_limits, is_pyramid=False) except: # napari 0.3.5 .... why?? napari.view_image(dat, contrast_limits=contrast_limits)
def viewCORstack(flip=True): ''' look at all the ifgs with napari''' import numpy as np import isceobj import napari params = np.load('params.npy', allow_pickle=True).item() gam = np.load('gam.npy') stack = np.zeros((len(params['pairs']), params['nyl'], params['nxl'])) for ii in range(len(params['pairs'])): p = params['pairs'][ii] f = './merged/interferograms/' + p + '/cor_lk.r4' intImage = isceobj.createImage() intImage.dataType = 'FLOAT' intImage.load(f + '.xml') unw = intImage.memMap()[:, :, 0] unw = unw.copy() unw[gam == 0] = 0 if flip: stack[ii, :, :] = np.flipud(unw) else: stack[ii, :, :] = unw viewer = napari.view_image(stack, colormap='jet')
def plot_napari(img): import napari from scipy import ndimage as ndi """Visualize 3D segmentation results via napari viewer""" viewer = napari.view_image(img, name='cells') labeled = ndi.label(img)[0] viewer.add_labels(labeled, name='cells_colored')
def look_at_video(filepath, rgb=False, cmap="gray"): # Use this function to look at a single video in napari # filepath should be path to memmapped file images = load_mmapped(filepath) with napari.gui_qt(): viewer = napari.view_image(images, rgb=rgb, colormap=cmap)
def Annotate(Raw, SegImage): with napari.gui_qt(): viewer = napari.view_image(Raw, name = 'ThreeDimage') viewer.add_image(SegImage) pts_layer = viewer.add_points(size = 5) pts_layer.mode = 'add' return pts_layer
def show_segmentation(image, label1=None, label2=None): with napari.gui_qt(): #viewer = napari.Viewer() viewer = napari.view_image(image, rgb=False) #viewer.add_image(array, rgb=False, colormap="green") viewer.add_labels(label1.astype(np.uint8), name='segmentation1') if np.any(label2): viewer.add_labels(label2.astype(np.uint8), name='segmentation2')
def test_view_multichannel(qtbot): """Test adding image.""" np.random.seed(0) data = np.random.random((15, 10, 5)) viewer = napari.view_image(data, channel_axis=-1, show=False) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) viewer.close()
def show_surface(): model = _model3d() img, mask = test_image_nuclei_3d(return_mask=True) x = normalize(img, 1, 99.8) labels, polys = model.predict_instances(x) surface = surface_from_polys(polys) # add the surface viewer = napari.view_image(img) viewer.add_surface(surface) return viewer
def __getitem__(self, index): sample, subject = self.load_subject_(index) transformed_ = self.transform(subject) if self.opt.visualize_volume: with napari.gui_qt(): napari.view_image(np.stack([transformed_['mr'].data.squeeze().numpy(), transformed_['trus'].data.squeeze().numpy()])) dict_ = { 'A': transformed_['mr'].data[:, :self.input_size[0], :self.input_size[1], :self.input_size[2]], 'B': transformed_['trus'].data[:, :self.input_size[0], :self.input_size[1], :self.input_size[2]], #'Patient': sample.split('/')[-4].replace(' ', ''), 'A_paths': sample + "/mr.mhd", 'B_paths': sample + "/trus.mhd" } if self.load_mask: dict_['A_mask'] = transformed_['mr_tree'].data[:, :self.input_size[0], :self.input_size[1], :self.input_size[2]] return dict_
def test_view_image(qtbot): """Test adding image.""" np.random.seed(0) data = np.random.random((10, 15)) viewer = napari.view_image(data) view = viewer.window.qt_viewer qtbot.addWidget(view) assert np.all(viewer.layers[0].data == data) assert len(viewer.layers) == 1 assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2 assert viewer.dims.ndim == 2 assert view.dims.nsliders == viewer.dims.ndim assert np.sum(view.dims._displayed_sliders) == 0 # Close the viewer viewer.window.close() data = np.random.random((10, 15, 20)) viewer = napari.view_image(data) view = viewer.window.qt_viewer qtbot.addWidget(view) viewer.dims.ndisplay = 3 assert np.all(viewer.layers[0].data == data) assert len(viewer.layers) == 1 assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2 assert viewer.dims.ndim == 3 assert view.dims.nsliders == viewer.dims.ndim assert np.sum(view.dims._displayed_sliders) == 0 # Close the viewer viewer.window.close()
def run(obj: pathlib.Path, colls: pathlib.Path): colls = load_collisions(colls) colls_hist = histogram_colls_3d(colls) colls_hist /= colls_hist.sum() surface = create_napari_surface(obj) colormap = vispy.color.Colormap(plt.cm.gist_yarg(np.linspace(0, 1, 256))) with napari.gui_qt(): v = napari.view_image(colls_hist, ndisplay=3, rgb=False, colormap=colormap) v.theme = 'light' v.add_surface(surface, colormap='magenta') img = v.screenshot() # imageio.imwrite('/data/neural_collision_detection/results/for_article/fig1/toy_neuron_only_collisions.png', img[:, :,:3], transparency=(255, 255, 255), dpi=(300, 300), prefer_uint8=False) make_and_save_colorbar(img, colls_hist.min(), colls_hist.max())
def test_view_multichannel(qtbot): """Test adding image.""" np.random.seed(0) data = np.random.random((15, 10, 5)) viewer = napari.view_image(data, channel_axis=-1) view = viewer.window.qt_viewer qtbot.addWidget(view) assert len(viewer.layers) == data.shape[-1] for i in range(data.shape[-1]): assert np.all(viewer.layers[i].data == data.take(i, axis=-1)) # Close the viewer viewer.window.close()
def test_napari(ds_src_dir, ds_dst_dir): # store_as = "mip_xy" store_as = "raw" # reload logger.info(f'load dataset with "{store_as}"') ds = ZarrDataset.load(ds_dst_dir, label=store_as) iterator = TiledDatasetIterator(ds, axes="zyx", return_key=False) for i, uuid in enumerate(iterator): data = ds[uuid] with napari.gui_qt(): viewer = napari.view_image(data, scale=ds.voxel_size) raise RuntimeError("DEBUG")
def main(args): image = np.asarray(Image.open(args[1])) default_sigma = 5.0 with napari.gui_qt(): viewer = napari.view_image(image, rgb=True) livewire = LiveWire(color.rgb2lab(image), sigma=default_sigma) layer = viewer.add_labels(livewire.contour, color={1: 'cyan'}, name='contour', opacity=1.0) def valid(coords): return 0 <= round(coords[0]) < image.shape[0] and 0 <= round( coords[1]) < image.shape[1] @layer.mouse_move_callbacks.append def mouse_move(layer, event): coords = layer.coordinates if valid(coords): livewire.select(coords) layer.data = livewire.contour @layer.mouse_drag_callbacks.append def mouse_click(layer, event): livewire.confirm() @viewer.bind_key('s') def close_contour(viewer): livewire.close() layer.data = livewire.contour @magicgui(auto_call=True, sigma={ 'widget_type': QDoubleSpinBox, 'maximum': 255, 'minimum': 0.01, 'singleStep': 5.0 }) def update_sigma(sigma: float = default_sigma): livewire.sigma = sigma sigma_box = update_sigma.Gui() viewer.window.add_dock_widget(sigma_box, area='left') viewer.layers.events.changed.connect( lambda x: sigma_box.refresh_choices())
def run(obj: pathlib.Path, colls: pathlib.Path): colls = load_collisions(colls) colls_hist = histogram_colls_3d(colls) colls_hist /= colls_hist.sum() surface = create_napari_surface(obj) colormap = vispy.color.Colormap(plt.cm.gist_yarg(np.linspace(0, 1, 256))) with napari.gui_qt(): v = napari.view_image(colls_hist, ndisplay=3, rgb=False, colormap=colormap) v.theme = 'light' v.add_surface(surface, colormap='magenta') img = v.screenshot() # imageio.imsave('/data/neural_collision_detection/results/for_article/supp_colls/toy_neuron_with_collisions.png', img) fig, ax = plt.subplots(figsize=(8, 8)) ax.imshow(img, cmap='gist_yarg') ax.axis('off') fig.colorbar(plt.cm.ScalarMappable(cmap=plt.cm.gist_yarg, norm=plt.Normalize(vmin=colls_hist.min(), vmax=colls_hist.max()))) plt.show() fig.savefig("/data/neural_collision_detection/results/for_article/supp_colls/supp_colls.png", transparent=True, dpi=300)
def main(args): image = nib.load(args[1]).get_fdata() with napari.gui_qt(): viewer = napari.view_image(image) blank = np.zeros(image.shape, dtype=np.int) viewer.add_image(blank, name='path-value', visible=False, opacity=0.5) viewer.add_labels(blank, name='labels', opacity=0.5) viewer.add_labels(blank, name='markers', opacity=1) # @magicgui(call_button='Segment') # def _segment(): # segment(viewer) # @magicgui(call_button='Save Markers') # def _save_markers(path: str): # save_markers(viewer, path) # viewer.window.add_dock_widget([_segment, _save_markers], area='left') viewer.window.add_dock_widget([], area='left')
def image_thresh(image): # image = rgb2gray(io.imread(im)) if image.shape[0] > 4000: image = rescale(image, 0.5, preserve_range=True, anti_aliasing=True) image = np.uint8(image) def threshold(image, t): arr = da.from_array(image, chunks=image.shape) return arr > t all_thresholds = da.stack([threshold(image, t) for t in np.arange(255)]) viewer = napari.view_image(image, name='input image') viewer.add_image(all_thresholds, name='thresholded', colormap='magenta', blending='additive')
def test_view_pyramid(qtbot): """Test adding image pyramid.""" shapes = [(40, 20), (20, 10), (10, 5)] np.random.seed(0) data = [np.random.random(s) for s in shapes] viewer = napari.view_image(data, is_pyramid=True) view = viewer.window.qt_viewer qtbot.addWidget(view) assert np.all(viewer.layers[0].data == data) assert len(viewer.layers) == 1 assert view.layers.vbox_layout.count() == 2 * len(viewer.layers) + 2 assert viewer.dims.ndim == 2 assert view.dims.nsliders == viewer.dims.ndim assert np.sum(view.dims._displayed_sliders) == 0 # Close the viewer viewer.window.close()
def apply_limit(self, lim='bottom'): print(np.shape(self.mesh)) if lim == 'bottom': max = int(np.max(self.mesh)) print(max) for i in range(self.dim): for j in range(self.dim): self.data[j, i, int(self.mesh[j, i]):] = 0 self.data = self.data[:, :, 0:max] if lim == 'top': min = int(np.min(self.mesh)) for i in range(self.dim): for j in range(self.dim): self.data[j, i, :int(self.mesh[j, i])] = 0 self.data = self.data[:, :, min:] with napari.gui_qt(): viewer = napari.view_image(self.data)
def main(biomarker, filename, tablename, downscale_ratio, thres_range): table = pd.read_csv(tablename, index_col='ID') col = table.loc[:, biomarker].values image = img_as_ubyte(tifffile.imread(filename)) image = downscale_local_mean(image, (downscale_ratio, downscale_ratio)) centers = [] for t in thres_range: center = table.loc[ col > t / 100, ['centroid_y', 'centroid_x']].values // downscale_ratio center = np.concatenate((np.ones((center.shape[0], 1)) * t, center), axis=1).astype(int) if center.shape[0] != 0: centers.extend(center) centers = np.array(centers) with napari.gui_qt(): viewer = napari.view_image(image, name='biomarker', colormap='green') viewer.add_points(centers, size=[0, 3, 3], n_dimensional=True)
def viewIFGstack(flip=True): ''' look at all the ifgs with napari''' import numpy as np import isceobj import napari params = np.load('params.npy', allow_pickle=True).item() stack = np.zeros((len(params['pairs']), params['nyl'], params['nxl'])) for ii in range(len(params['pairs'])): p = params['pairs'][ii] f = './merged/interferograms/' + p + '/fine_lk_filt.int' intImage = isceobj.createIntImage() intImage.load(f + '.xml') ifg = intImage.memMap()[:, :, 0] ifgc = np.angle(ifg) if flip: stack[ii, :, :] = np.flipud(ifgc) else: stack[ii, :, :] = ifgc viewer = napari.view_image(stack, colormap='RdYlBu')
def get_fixed_points(reference): print( f"\n\nDefine at least {MIN_N_POINTS} fixed points on reference image.\n" + "Press 'q' to close viewer when all the points are defined") if isinstance(reference, str): reference = load_image(reference) with napari.gui_qt() as gui: reference_viewer = napari.view_image(reference, title='Get Fixed Points', name='reference') reference_viewer.window._qt_window.showFullScreen() points_layer = reference_viewer.add_points(size=POINTS_SIZE, edge_color='k', edge_width=EDGE_WIDTH, face_color='red', name='fixed_points') points_layer.mode = 'add' # Add keybindings @reference_viewer.bind_key('q', overwrite=True) def close_viewer(viewer): # Close viewer viewer.close() @points_layer.mouse_drag_callbacks.append def _print(layer, event): point, MIN_N_POINTS = layer.data[-1].astype(np.int32), len( layer.data) print(f" added point ({point}). Tot points: {MIN_N_POINTS}") points = [list(p) for p in clean_check_points(points_layer.data)] print(f"{len(points)} points were defined.\n") return points
from skimage import data import napari with napari.gui_qt(): viewer = napari.view_image(data.astronaut(), rgb=True) ################## KEYBINDINGS ################## # print the name of each layer on terminal @viewer.bind_key('p') def print_names(viewer): print([layer.name for layer in viewer.layers]) # print the message "hello" on terminal during key pressing # (all instructions before the yield command) # # on key releasing, print the message 'goodbye' on terminal # (all instructions after the yield command) @viewer.bind_key('m') def print_message(viewer): print('hello') yield print('goodbye')
across the dimensions, specified by their size """ import numpy as np from skimage import data import napari blobs = np.stack( [ data.binary_blobs( length=128, blob_size_fraction=0.05, n_dim=3, volume_fraction=f) for f in np.linspace(0.05, 0.5, 10) ], axis=0, ) viewer = napari.view_image(blobs.astype(float)) # add the points points = np.array([ [0, 0, 100, 100], [0, 0, 50, 120], [1, 0, 100, 40], [2, 10, 110, 100], [9, 8, 80, 100], ], dtype=float) viewer.add_points(points, size=[0, 6, 10, 10], face_color='blue', out_of_slice_display=True)
add_image APIs. When the window is closed it will print the coordinates of your shapes. """ import numpy as np from skimage import data import napari with napari.gui_qt(): # create the viewer and window viewer = napari.Viewer() # add the image photographer = data.camera() image_layer = napari.view_image(photographer, name='photographer') # create a list of polygons polygons = [ np.array([[11, 13], [111, 113], [22, 246]]), np.array( [ [505, 60], [402, 71], [383, 42], [251, 95], [212, 59], [131, 137], [126, 187], [191, 204], [171, 248],
TEST_SIZE = 64 s_psf = optics.gaussian_psf(numerical_aperture=0.3, npix_axial=TEST_SIZE + 1, npix_lateral=TEST_SIZE + 1) i_psf = inverse_psf_rfft(s_psf, l=1e-15, mode='constant') psfft = fft.rfft2(s_psf.sum(0)) dirac = fft.irfft2(psfft * i_psf, s=s_psf.shape[1:]) sample = primitives.boccia(TEST_SIZE, radius=(0.8 * TEST_SIZE) // 2, n_stripes=4) s_theta = np.arange(90) s_radon = spl.radon(sample, theta=s_theta, circle=True) s_fpsopt = imaging.fps_opt(sample, s_psf, theta=s_theta) s_deconv = deconvolve_sinogram(s_fpsopt, s_psf, l=0) viewer = napari.view_image(s_radon) viewer.add_image(s_fpsopt) viewer.add_image(s_deconv) viewer = napari.view_image(fft.fftshift(np.abs(i_psf), 0), name='inverse PSF FFT') viewer.add_image(dirac) napari.run()
""" Displays an image pyramid """ from skimage import data from skimage.util import img_as_ubyte from skimage.color import rgb2gray from skimage.transform import pyramid_gaussian import napari import numpy as np # create pyramid from astronaut image base = np.tile(data.astronaut(), (8, 8, 1)) pyramid = list( pyramid_gaussian(base, downscale=2, max_layer=4, multichannel=True)) print('pyramid level shapes: ', [p.shape[:2] for p in pyramid]) with napari.gui_qt(): # add image pyramid napari.view_image(pyramid, is_pyramid=True)
import matplotlib.pyplot as plt import numpy as np from matplotlib.backends.backend_qt5agg import FigureCanvas import napari # create image x = np.linspace(0, 5, 256) y = np.linspace(0, 5, 256)[:, np.newaxis] img = np.sin(x)**10 + np.cos(10 + y * x) * np.cos(x) # add it to the viewer viewer = napari.view_image(img, colormap='viridis') layer = viewer.layers[-1] # create mpl figure with subplots mpl_fig = plt.figure() ax = mpl_fig.add_subplot(111) (line, ) = ax.plot(layer.data[123]) # linescan through the middle of the image # add the figure to the viewer as a FigureCanvas widget viewer.window.add_dock_widget(FigureCanvas(mpl_fig)) # connect a callback that updates the line plot when # the user clicks on the image @layer.mouse_drag_callbacks.append def profile_lines_drag(layer, event): try: line.set_ydata(layer.data[int(event.position[0])]) line.figure.canvas.draw()